From 12405de032020ca9706c632c71085f4f80242168 Mon Sep 17 00:00:00 2001 From: odow Date: Fri, 4 Nov 2022 11:00:12 +1300 Subject: [PATCH 01/23] Change MutableArithmetics.rewrite to move_factors_into_sums=false --- Project.toml | 2 +- docs/src/manual/expressions.md | 2 +- src/complement.jl | 2 +- src/macros.jl | 14 +++++++------- test/test_macros.jl | 2 +- test/test_print.jl | 2 +- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Project.toml b/Project.toml index 9e040d7d24e..4e4fc6b1785 100644 --- a/Project.toml +++ b/Project.toml @@ -21,7 +21,7 @@ JuMPDimensionalDataExt = "DimensionalData" [compat] DimensionalData = "0.24" MathOptInterface = "1.18" -MutableArithmetics = "1" +MutableArithmetics = "1.1" OrderedCollections = "1" SnoopPrecompile = "1" julia = "1.6" diff --git a/docs/src/manual/expressions.md b/docs/src/manual/expressions.md index 3afff752ee2..56023a8c084 100644 --- a/docs/src/manual/expressions.md +++ b/docs/src/manual/expressions.md @@ -195,7 +195,7 @@ julia> @variable(model, y) y julia> ex = @expression(model, x^2 + 2 * x * y + y^2 + x + y - 1) -x² + 2 y*x + y² + x + y - 1 +x² + 2 x*y + y² + x + y - 1 ``` ### Operator overloading diff --git a/src/complement.jl b/src/complement.jl index e3de1ce2175..f92ca45827c 100644 --- a/src/complement.jl +++ b/src/complement.jl @@ -70,6 +70,6 @@ function parse_constraint_call( F, x, ) - f, parse_code = _MA.rewrite(F) + f, parse_code = _MA.rewrite(F; move_factors_into_sums = false) return parse_code, :(_build_complements_constraint($errorf, $f, $(esc(x)))) end diff --git a/src/macros.jl b/src/macros.jl index 9d6545afac9..0740f64c562 100644 --- a/src/macros.jl +++ b/src/macros.jl @@ -501,9 +501,9 @@ function parse_constraint_head( "`$ub >= ... >= $lb`.", ) end - new_aff, parse_aff = _MA.rewrite(aff) - new_lb, parse_lb = _MA.rewrite(lb) - new_ub, parse_ub = _MA.rewrite(ub) + new_aff, parse_aff = _MA.rewrite(aff; move_factors_into_sums = false) + new_lb, parse_lb = _MA.rewrite(lb; move_factors_into_sums = false) + new_ub, parse_ub = _MA.rewrite(ub; move_factors_into_sums = false) parse_code = quote $parse_aff $parse_lb @@ -584,7 +584,7 @@ function parse_constraint_call( func, set, ) - f, parse_code = _MA.rewrite(func) + f, parse_code = _MA.rewrite(func; move_factors_into_sums = false) build_call = if vectorized :(build_constraint.($_error, _desparsify($f), Ref($(esc(set))))) else @@ -618,7 +618,7 @@ function parse_constraint_call( rhs, ) func = vectorized ? :($lhs .- $rhs) : :($lhs - $rhs) - f, parse_code = _MA.rewrite(func) + f, parse_code = _MA.rewrite(func; move_factors_into_sums = false) set = operator_to_set(_error, operator) # `_functionize` deals with the pathological case where the `lhs` is a # `VariableRef` and the `rhs` is a summation with no terms. @@ -1590,7 +1590,7 @@ macro objective(model, args...) end sense, x = args sense_expr = _moi_sense(_error, sense) - newaff, parsecode = _MA.rewrite(x) + newaff, parsecode = _MA.rewrite(x; move_factors_into_sums = false) code = quote $parsecode # Don't leak a `_MA.Zero` if the objective expression is an empty @@ -1679,7 +1679,7 @@ macro expression(args...) "different name for the index.", ) end - code = _MA.rewrite_and_return(x) + code = _MA.rewrite_and_return(x; move_factors_into_sums = false) code = quote # Don't leak a `_MA.Zero` if the expression is an empty summation, or # other structure that returns `_MA.Zero()`. diff --git a/test/test_macros.jl b/test/test_macros.jl index ab2b38fa46e..51729c34c25 100644 --- a/test/test_macros.jl +++ b/test/test_macros.jl @@ -619,7 +619,7 @@ end function test_Error_on_unexpected_comparison() m = Model() @variable(m, x) - @test_macro_throws ErrorException @expression(m, x <= 1) + @test_throws ErrorException @expression(m, x <= 1) return end diff --git a/test/test_print.jl b/test/test_print.jl index 790db8a1c40..649c268fc03 100644 --- a/test/test_print.jl +++ b/test/test_print.jl @@ -180,7 +180,7 @@ function test_printing_expressions() "x_{1}\\times y_{2,2} + x_{2}\\times y_{2,2} + z$ijulia_sq + 3 x_{1} + 3 x_{2} - 1", ) - ex = @expression(mod, -z * x[1] - x[1] * z + x[1] * x[2] + 0 * z^2) + ex = @expression(mod, -z * x[1] - z * x[1] + x[1] * x[2] + 0 * z^2) io_test(MIME("text/plain"), ex, "-2 z*x[1] + x[1]*x[2]") io_test(MIME("text/latex"), ex, "-2 z\\times x_{1} + x_{1}\\times x_{2}") From 428f2f1b81712504c242407e6fe0b30433019dfc Mon Sep 17 00:00:00 2001 From: odow Date: Mon, 10 Oct 2022 16:48:12 +1300 Subject: [PATCH 02/23] WIP: [Nonlinear] begin experiments with NonlinearExpr This PR is an experiment with different representations of a nonlinear expression type for JuMP. As a naive first pass, we replicate the structure of basic Julia Expr objects. The motivation for this is that we already create these in the macros, and it seems to work okay, so why not make it official! Once I get benchmarks setup, etc, we may need to try something else, but this is a good reference implementation. --- docs/src/manual/nlp.md | 38 ---- src/JuMP.jl | 2 + src/mutable_arithmetics.jl | 20 +- src/nlp.jl | 11 + src/nlp_expr.jl | 280 +++++++++++++++++++++++ src/operators.jl | 94 +------- src/print.jl | 4 + test/nlp_expr.jl | 348 ++++++++++++++++++++++++++++ test/perf/nonlinear_expr.jl | 440 ++++++++++++++++++++++++++++++++++++ test/perf/nonlinear_expr.py | 143 ++++++++++++ test/test_operator.jl | 48 ++-- 11 files changed, 1277 insertions(+), 151 deletions(-) create mode 100644 src/nlp_expr.jl create mode 100644 test/nlp_expr.jl create mode 100644 test/perf/nonlinear_expr.jl create mode 100644 test/perf/nonlinear_expr.py diff --git a/docs/src/manual/nlp.md b/docs/src/manual/nlp.md index 56bccdc92f7..1eefe60c50d 100644 --- a/docs/src/manual/nlp.md +++ b/docs/src/manual/nlp.md @@ -172,23 +172,6 @@ julia> value.(p) 3.0 ``` -Nonlinear parameters can be used *within nonlinear macros* only: - -```jldoctest nonlinear_parameters -julia> @objective(model, Max, p[1] * x) -ERROR: MethodError: no method matching *(::NonlinearParameter, ::VariableRef) -[...] - -julia> @NLobjective(model, Max, p[1] * x) - -julia> @expression(model, my_expr, p[1] * x^2) -ERROR: MethodError: no method matching *(::NonlinearParameter, ::QuadExpr) -[...] - -julia> @NLexpression(model, my_nl_expr, p[1] * x^2) -subexpression[1]: parameter[1] * x ^ 2.0 -``` - ### When to use a parameter Nonlinear parameters are useful when solving nonlinear models in a sequence: @@ -220,27 +203,6 @@ nothing #hide The syntax accepted in nonlinear macros is more restricted than the syntax for linear and quadratic macros. We note some important points below. -### No operator overloading - -There is no operator overloading provided to build up nonlinear expressions. -For example, if `x` is a JuMP variable, the code `3x` will return an -`AffExpr` object that can be used inside of future expressions and linear -constraints. However, the code `sin(x)` is an error. All nonlinear -expressions must be inside of macros. - -```jldoctest -julia> model = Model(); - -julia> @variable(model, x); - -julia> expr = sin(x) + 1 -ERROR: sin is not defined for type AbstractVariableRef. Are you trying to build a nonlinear problem? Make sure you use @NLconstraint/@NLobjective. -[...] - -julia> expr = @NLexpression(model, sin(x) + 1) -subexpression[1]: sin(x) + 1.0 -``` - ### Scalar operations only Except for the splatting syntax discussed below, all expressions diff --git a/src/JuMP.jl b/src/JuMP.jl index e9328ad52c1..88a930a8f80 100644 --- a/src/JuMP.jl +++ b/src/JuMP.jl @@ -1079,6 +1079,8 @@ include("objective.jl") include("aff_expr.jl") include("quad_expr.jl") include("nlp.jl") +include("nlp_expr.jl") + include("macros.jl") include("optimizer_interface.jl") diff --git a/src/mutable_arithmetics.jl b/src/mutable_arithmetics.jl index 3a51dd85ce1..a07c16587cb 100644 --- a/src/mutable_arithmetics.jl +++ b/src/mutable_arithmetics.jl @@ -286,7 +286,10 @@ end function _MA.add_mul(lhs::AbstractJuMPScalar, x::_Scalar, y::_Scalar) T = _MA.promote_operation(_MA.add_mul, typeof(lhs), typeof(x), typeof(y)) expr = _MA.operate(convert, T, lhs) - return _MA.operate!(_MA.add_mul, expr, x, y) + if _MA.mutability(T) == _MA.IsMutable() + return _MA.operate!(_MA.add_mul, expr, x, y) + end + return expr + _MA.operate(*, x, y) end function _MA.add_mul( @@ -303,13 +306,19 @@ function _MA.add_mul( typeof.(args)..., ) expr = _MA.operate(convert, T, lhs) - return _MA.operate!(_MA.add_mul, expr, x, y, args...) + if _MA.mutability(T) == _MA.IsMutable() + return _MA.operate!(_MA.add_mul, expr, x, y, args...) + end + return expr + _MA.operate(*, x, y, args...) end function _MA.sub_mul(lhs::AbstractJuMPScalar, x::_Scalar, y::_Scalar) T = _MA.promote_operation(_MA.sub_mul, typeof(lhs), typeof(x), typeof(y)) expr = _MA.operate(convert, T, lhs) - return _MA.operate!(_MA.sub_mul, expr, x, y) + if _MA.mutability(T) == _MA.IsMutable() + return _MA.operate!(_MA.sub_mul, expr, x, y) + end + return expr - _MA.operate(*, x, y) end function _MA.sub_mul( @@ -326,5 +335,8 @@ function _MA.sub_mul( typeof.(args)..., ) expr = _MA.operate(convert, T, lhs) - return _MA.operate!(_MA.sub_mul, expr, x, y, args...) + if _MA.mutability(T) == _MA.IsMutable() + return _MA.operate!(_MA.sub_mul, expr, x, y, args...) + end + return expr - _MA.operate(*, x, y, args...) end diff --git a/src/nlp.jl b/src/nlp.jl index 47af4693a7d..205f5e86902 100644 --- a/src/nlp.jl +++ b/src/nlp.jl @@ -25,6 +25,8 @@ end function _init_NLP(model::GenericModel{Float64}) if model.nlp_model === nothing model.nlp_model = MOI.Nonlinear.Model() + # TODO(odow): move this into MOI + model.ext[:nlp_constraint_names] = Dict{NonlinearConstraintRef,String}() end return end @@ -551,6 +553,15 @@ function value(var_value::Function, c::NonlinearConstraintRef) ) end +function name(c::NonlinearConstraintRef) + return get(c.model.ext[:nlp_constraint_names], c, "") +end + +function set_name(c::NonlinearConstraintRef, name::String) + c.model.ext[:nlp_constraint_names][c] = name + return +end + ### ### Nonlinear dual solutions ### diff --git a/src/nlp_expr.jl b/src/nlp_expr.jl new file mode 100644 index 00000000000..c17e1b0bd22 --- /dev/null +++ b/src/nlp_expr.jl @@ -0,0 +1,280 @@ +# Copyright 2017, Iain Dunning, Joey Huchette, Miles Lubin, and contributors +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + +struct NonlinearExpr <: AbstractJuMPScalar + head::Symbol + args::Vector{Any} +end + +# We include this method so that we can refactor the internal representation of +# NonlinearExpr without having to rewrite the method overloads. +function NonlinearExpr(head::Symbol, args...) + return NonlinearExpr(head, Any[args...]) +end + +Base.length(x::NonlinearExpr) = length(x.args) +Base.getindex(x::NonlinearExpr, i::Int) = x.args[i] + +function function_string(::MIME"text/plain", x::NonlinearExpr) + io, stack, is_open = IOBuffer(), Any[x], true + while !isempty(stack) + arg = pop!(stack) + if !is_open && arg != ')' + print(io, ", ") + end + if arg isa NonlinearExpr + print(io, arg.head, "(") + push!(stack, ')') + for i in length(arg):-1:1 + push!(stack, arg[i]) + end + else + print(io, arg) + end + is_open = arg isa NonlinearExpr + end + seekstart(io) + return read(io, String) +end + +function function_string(::MIME"text/latex", expr::NonlinearExpr) + return "\\textsf{$(function_string(MIME("text/plain"), expr))}" +end + +_isequal(x, y) = x == y +_isequal(x::T, y::T) where {T<:AbstractJuMPScalar} = isequal_canonical(x, y) + +function isequal_canonical(x::NonlinearExpr, y::NonlinearExpr) + return x.head == y.head && + length(x) == length(y) && + all(i -> _isequal(x[i], y[i]), 1:length(x)) +end + +function MOI.Nonlinear.parse_expression( + data::MOI.Nonlinear.Model, + expr::MOI.Nonlinear.Expression, + x::NonlinearExpr, + parent::Int, +) + stack = Tuple{Int,Any}[(parent, x)] + while !isempty(stack) + parent_node, arg = pop!(stack) + if arg isa NonlinearExpr + _parse_without_recursion_inner(stack, data, expr, arg, parent_node) + else + # We can use recursion here, because NonlinearExpr only occur in + # other NonlinearExpr. + MOI.Nonlinear.parse_expression(data, expr, arg, parent_node) + end + end + return +end + +function _parse_without_recursion_inner(stack, data, expr, x, parent) + id = get(data.operators.univariate_operator_to_id, x.head, nothing) + node_type = if length(x) == 1 && id !== nothing + MOI.Nonlinear.NODE_CALL_UNIVARIATE + else + id = get(data.operators.multivariate_operator_to_id, x.head, nothing) + @assert id !== nothing + MOI.Nonlinear.NODE_CALL_MULTIVARIATE + end + push!(expr.nodes, MOI.Nonlinear.Node(node_type, id, parent)) + parent = length(expr.nodes) + for i in length(x):-1:1 # Args need to be pushed onto the stack in reverse + push!(stack, (parent, x[i])) + end + return +end + +# Method definitions + +Base.zero(::Type{NonlinearExpr}) = NonlinearExpr(:+, 0.0) + +Base.one(::Type{NonlinearExpr}) = NonlinearExpr(:+, 1.0) + +# Univariate operators + +for f in MOI.Nonlinear.DEFAULT_UNIVARIATE_OPERATORS + op = Meta.quot(f) + if f == :+ + continue # We don't need this. + elseif f == :- + @eval Base.:-(x::NonlinearExpr) = NonlinearExpr(:-, x) + elseif isdefined(Base, f) + @eval Base.$(f)(x::AbstractJuMPScalar) = NonlinearExpr($op, x) + elseif isdefined(MOI.Nonlinear, :SpecialFunctions) + # The operator is defined in some other package. + SF = MOI.Nonlinear.SpecialFunctions + if isdefined(SF, f) + @eval $(SF).$(f)(x::AbstractJuMPScalar) = NonlinearExpr($op, x) + end + end +end + +# Multivariate operators + +# The multivariate operators in MOI are +, -, *, ^, /, ifelse, atan +# +# However, ifelse is a builtin, so we can't add methods to it. + +# We need only very generic fallbacks for these, because all other cases are +# caught with more specific methods. +for f in (:+, :-, :*, :^, :/, :atan) + op = Meta.quot(f) + @eval begin + function Base.$(f)(x::AbstractJuMPScalar, y::_Constant) + rhs = convert(Float64, _constant_to_number(y)) + return NonlinearExpr($op, x, rhs) + end + function Base.$(f)(x::_Constant, y::AbstractJuMPScalar) + lhs = convert(Float64, _constant_to_number(x)) + return NonlinearExpr($op, lhs, y) + end + function Base.$(f)(x::AbstractJuMPScalar, y::AbstractJuMPScalar) + return NonlinearExpr($op, x, y) + end + end +end + +# JuMP interop + +function set_objective_function(model::Model, expr::NonlinearExpr) + nlp = nonlinear_model(model; force = true) + MOI.Nonlinear.set_objective(nlp, expr) + return +end + +function add_constraint( + model::Model, + con::ScalarConstraint{NonlinearExpr,S}, + name::String, +) where { + S<:Union{ + MOI.LessThan{Float64}, + MOI.GreaterThan{Float64}, + MOI.EqualTo{Float64}, + MOI.Interval{Float64}, + }, +} + nlp = nonlinear_model(model; force = true) + index = MOI.Nonlinear.add_constraint(nlp, con.func, con.set) + con_ref = ConstraintRef(model, index, ScalarShape()) + set_name(con_ref, name) + return con_ref +end + +function constraint_object(c::NonlinearConstraintRef) + nlp = nonlinear_model(c.model) + data = nlp.constraints[index(c)] + return ScalarConstraint(jump_function(c.model, data.expression), data.set) +end + +function jump_function(model::Model, expr::MOI.Nonlinear.Expression) + nlp = nonlinear_model(model) + parsed = Vector{Any}(undef, length(expr.nodes)) + adj = MOI.Nonlinear.adjacency_matrix(expr.nodes) + rowvals = SparseArrays.rowvals(adj) + for i in length(expr.nodes):-1:1 + node = expr.nodes[i] + parsed[i] = if node.type == MOI.Nonlinear.NODE_CALL_UNIVARIATE + NonlinearExpr( + nlp.operators.univariate_operators[node.index], + parsed[rowvals[SparseArrays.nzrange(adj, i)[1]]], + ) + elseif node.type == MOI.Nonlinear.NODE_CALL_MULTIVARIATE + NonlinearExpr( + nlp.operators.multivariate_operators[node.index], + Any[parsed[rowvals[j]] for j in SparseArrays.nzrange(adj, i)], + ) + elseif node.type == MOI.Nonlinear.NODE_MOI_VARIABLE + VariableRef(model, MOI.VariableIndex(node.index)) + elseif node.type == MOI.Nonlinear.NODE_PARAMETER + NonlinearParameter(model, node.index) + elseif node.type == MOI.Nonlinear.NODE_SUBEXPRESSION + NonlinearExpression(model, node.index) + elseif node.type == MOI.Nonlinear.NODE_VALUE + expr.values[node.index] + else + # node.type == MOI.Nonlinear.NODE_COMPARISON + # node.type == MOI.Nonlinear.NODE_LOGIC + error("Unsupported node") + end + end + return parsed[1] +end + +# MutableArithmetics.jl + +# These converts are used in the {add,sub}mul definition for AbstractJuMPScalar. + +Base.convert(::Type{NonlinearExpr}, x::AbstractVariableRef) = x + +function Base.convert(::Type{NonlinearExpr}, x::GenericAffExpr) + args = Any[] + for (variable, coef) in x.terms + if isone(coef) + push!(args, variable) + elseif !iszero(coef) + push!(args, NonlinearExpr(:*, coef, variable)) + end + end + if !iszero(x.constant) || isempty(args) + push!(args, x.constant) + end + if length(args) == 1 + return args[1] + end + return NonlinearExpr(:+, args) +end + +function Base.convert(::Type{NonlinearExpr}, x::GenericQuadExpr) + args = Any[] + for (variable, coef) in x.aff.terms + if isone(coef) + push!(args, variable) + elseif !iszero(coef) + push!(args, NonlinearExpr(:*, coef, variable)) + end + end + for (pair, coef) in x.terms + if isone(coef) + push!(args, NonlinearExpr(:*, pair.a, pair.b)) + elseif !iszero(coef) + push!(args, NonlinearExpr(:*, coef, pair.a, pair.b)) + end + end + if !iszero(x.aff.constant) || isempty(args) + push!(args, x.aff.constant) + end + if length(args) == 1 + return args[1] + end + return NonlinearExpr(:+, args) +end + +function _MA.promote_operation( + ::Union{typeof(+),typeof(-),typeof(*)}, + ::Type{NonlinearExpr}, + ::Type{<:AbstractJuMPScalar}, +) + return NonlinearExpr +end + +function _MA.promote_operation( + ::Union{typeof(+),typeof(-),typeof(*)}, + ::Type{<:AbstractJuMPScalar}, + ::Type{NonlinearExpr}, +) + return NonlinearExpr +end + +function _MA.promote_operation( + ::Union{typeof(+),typeof(-),typeof(*)}, + ::Type{NonlinearExpr}, + ::Type{NonlinearExpr}, +) + return NonlinearExpr +end diff --git a/src/operators.jl b/src/operators.jl index 1d27e708e17..b90f61edaef 100644 --- a/src/operators.jl +++ b/src/operators.jl @@ -172,9 +172,6 @@ function Base.:*( end return result end -function Base.:/(lhs::AbstractVariableRef, rhs::GenericAffExpr) - return error("Cannot divide a variable by an affine expression") -end # AbstractVariableRef--GenericQuadExpr function Base.:+(v::AbstractVariableRef, q::GenericQuadExpr) return GenericQuadExpr(v + q.aff, copy(q.terms)) @@ -208,9 +205,10 @@ function Base.:^(lhs::AbstractVariableRef, rhs::Integer) return one(GenericQuadExpr{T,variable_ref_type(lhs)}) else error( - "Only exponents of 0, 1, or 2 are currently supported. Are you " * - "trying to build a nonlinear problem? Make sure you use " * - "@NLconstraint/@NLobjective.", + "Invalid integer exponent detected in expression `$lhs^$rhs`: " * + "supported exponents are 0, 1, or 2. " * + "If you are trying to build a nonlinear problem, use `x^$rhs.0` " * + "instead of `x^$rhs`, or use `x^Float64(y)` instead of `x^y`.", ) end end @@ -224,18 +222,15 @@ function Base.:^(lhs::GenericAffExpr{T}, rhs::Integer) where {T} return one(GenericQuadExpr{T,variable_ref_type(lhs)}) else error( - "Only exponents of 0, 1, or 2 are currently supported. Are you " * - "trying to build a nonlinear problem? Make sure you use " * - "@NLconstraint/@NLobjective.", + "Invalid integer exponent `$rhs` detected on an affine " * + "expression: supported exponents are 0, 1, or 2. " * + "If you are trying to build a nonlinear problem, use " * + "`aff^$rhs.0` instead of `aff^$rhs`, or use `aff^Float64(y)` " * + "instead of `aff^y`.", ) end end -function Base.:^(lhs::Union{AbstractVariableRef,GenericAffExpr}, rhs::_Constant) - return error( - "Only exponents of 0, 1, or 2 are currently supported. Are you trying to build a nonlinear problem? Make sure you use @NLconstraint/@NLobjective.", - ) -end # GenericAffExpr--AbstractVariableRef function Base.:+( lhs::GenericAffExpr{C,V}, @@ -265,9 +260,6 @@ function Base.:*( end return result end -function Base.:/(lhs::GenericAffExpr, rhs::AbstractVariableRef) - return error("Cannot divide affine expression by a variable") -end # AffExpr--AffExpr _copy_convert_coef(::Type{C}, aff::GenericAffExpr{C}) where {C} = copy(aff) @@ -374,12 +366,6 @@ end function Base.:-(q::GenericQuadExpr, v::AbstractVariableRef) return GenericQuadExpr(q.aff - v, copy(q.terms)) end -function Base.:*(q::GenericQuadExpr, v::AbstractVariableRef) - return error("Cannot multiply a quadratic expression by a variable") -end -function Base.:/(q::GenericQuadExpr, v::AbstractVariableRef) - return error("Cannot divide a quadratic expression by a variable") -end # GenericQuadExpr--GenericAffExpr function Base.:+(q::GenericQuadExpr, a::GenericAffExpr) return GenericQuadExpr(q.aff + a, copy(q.terms)) @@ -387,12 +373,6 @@ end function Base.:-(q::GenericQuadExpr, a::GenericAffExpr) return GenericQuadExpr(q.aff - a, copy(q.terms)) end -function Base.:*(q::GenericQuadExpr, a::GenericAffExpr) - return error("Cannot multiply a quadratic expression by an aff. expression") -end -function Base.:/(q::GenericQuadExpr, a::GenericAffExpr) - return error("Cannot divide a quadratic expression by an aff. expression") -end # GenericQuadExpr--GenericQuadExpr function Base.:+(q1::GenericQuadExpr{S}, q2::GenericQuadExpr{T}) where {S,T} result = _copy_convert_coef(_MA.promote_operation(+, S, T), q1) @@ -483,59 +463,3 @@ function LinearAlgebra.issymmetric(x::Matrix{T}) where {T<:_JuMPTypes} end return true end - -############################################################################### -# nonlinear function fallbacks for JuMP built-in types -############################################################################### - -const _OP_HINT = - "Are you trying to build a nonlinear problem? Make sure you use " * - "@NLconstraint/@NLobjective. If you are using an `@NL` macro and you " * - "encountered this error message, it is because you are attempting to use " * - "another unsupported function which calls this method internally." - -for f in MOI.Nonlinear.DEFAULT_UNIVARIATE_OPERATORS - if f in (:+, :-, :abs) || !isdefined(Base, f) - continue - end - for T in (:AbstractVariableRef, :GenericAffExpr, :GenericQuadExpr) - if f == :abs2 && (T == :AbstractVariableRef || T == :GenericAffExpr) - continue - end - error_string = "$f is not defined for type $T. $_OP_HINT" - @eval Base.$(f)(::$T) = error($error_string) - end -end - -function Base.:*( - ::T, - ::S, -) where { - T<:GenericQuadExpr, - S<:Union{AbstractVariableRef,GenericAffExpr,GenericQuadExpr}, -} - return error("*(::$T,::$S) is not defined. $_OP_HINT") -end -function Base.:*(lhs::GenericQuadExpr, rhs::GenericQuadExpr) - return error( - "*(::GenericQuadExpr,::GenericQuadExpr) is not defined. $_OP_HINT", - ) -end -function Base.:*( - ::S, - ::T, -) where { - T<:GenericQuadExpr, - S<:Union{AbstractVariableRef,GenericAffExpr,GenericQuadExpr}, -} - return error("*(::$S,::$T) is not defined. $_OP_HINT") -end -function Base.:/( - ::S, - ::T, -) where { - S<:Union{_Constant,AbstractVariableRef,GenericAffExpr,GenericQuadExpr}, - T<:Union{AbstractVariableRef,GenericAffExpr,GenericQuadExpr}, -} - return error("/(::$S,::$T) is not defined. $_OP_HINT") -end diff --git a/src/print.jl b/src/print.jl index ba03e5dccd7..10d21f70e19 100644 --- a/src/print.jl +++ b/src/print.jl @@ -866,6 +866,10 @@ end function Base.show(io::IO, c::NonlinearConstraintRef) index = MOI.Nonlinear.ConstraintIndex(c.index.value) str = nonlinear_constraint_string(c.model, MIME("text/plain"), index) + n = name(c) + if !isempty(n) + print(io, n, ": ") + end return print(io, str) end diff --git a/test/nlp_expr.jl b/test/nlp_expr.jl new file mode 100644 index 00000000000..f749bd1896d --- /dev/null +++ b/test/nlp_expr.jl @@ -0,0 +1,348 @@ +# Copyright 2017, Iain Dunning, Joey Huchette, Miles Lubin, and contributors +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + +module TestNLPExpr + +using JuMP +using Test + +function runtests() + for name in names(@__MODULE__; all = true) + if startswith("$name", "test_") + @testset "$(name)" begin + getfield(@__MODULE__, name)() + end + end + end + return +end + +function test_univariate_operators() + model = Model() + @variable(model, x) + for f in MOI.Nonlinear.DEFAULT_UNIVARIATE_OPERATORS + if f in (:+, :-, :abs2) + op = getfield(Base, f) + @test op(sin(x)) isa NonlinearExpr + elseif isdefined(Base, f) + op = getfield(Base, f) + @test op(x) isa NonlinearExpr + elseif isdefined(MOI.Nonlinear.SpecialFunctions, f) + op = getfield(MOI.Nonlinear.SpecialFunctions, f) + @test op(x) isa NonlinearExpr + end + end + return +end + +function test_binary_operators() + model = Model() + @variable(model, x) + num, aff, quad, nlp = 1.0, 1.0 + x, x^2, sin(x) + for op in (+, -, *, /), a in (num, x, aff, quad, nlp) + @test op(a, nlp) isa NonlinearExpr + @test op(nlp, a) isa NonlinearExpr + end + for op in (*, /), a in (x, aff) + @test op(a, quad) isa NonlinearExpr + @test op(quad, a) isa NonlinearExpr + end + for a in (num, x, aff, quad), b in (x, aff, quad) + @test /(a, b) isa NonlinearExpr + end + return +end + +function test_objective() + model = Model() + @variable(model, x) + @objective(model, Min, 2.0 * sin(x)^2 + cos(x) / x) + @test JuMP._nlp_objective_function(model) isa MOI.Nonlinear.Expression + return +end + +function test_expression() + model = Model() + @variable(model, x) + @variable(model, y[1:3]) + @test string(@expression(model, *(y...))) == "*(y[1]*y[2], y[3])" + @test string(@expression(model, sin(x))) == "sin(x)" + @test string(@expression(model, 2^x)) == "^(2.0, x)" + @test string(@expression(model, x^x)) == "^(x, x)" + @test string(@expression(model, sin(x)^2)) == "^(sin(x), 2.0)" + @test string(@expression(model, sin(x)^2.0)) == "^(sin(x), 2.0)" + @test string(@expression(model, 2 * sin(x)^2.0)) == "*(2.0, ^(sin(x), 2.0))" + @test string(@expression(model, 1 + sin(x))) == "+(1.0, sin(x))" + @test string(@expression(model, 1 + 2 * sin(x))) == "+(1.0, *(2.0, sin(x)))" + @test string(@expression(model, 2.0 * sin(x)^2 + cos(x) / x)) == + "+(*(2.0, ^(sin(x), 2.0)), /(cos(x), x))" + @test string(@expression(model, 2.0 * sin(x)^2 - cos(x) / x)) == + "-(*(2.0, ^(sin(x), 2.0)), /(cos(x), x))" + return +end + +function test_flatten_nary() + model = Model() + @variable(model, x) + @test string(zero(NonlinearExpr) + 1) == "+(+(0.0), 1.0)" + @test string(zero(NonlinearExpr) + x) == "+(+(0.0), x)" + @test string(sin(x) + sin(x) + 1) == "+(+(sin(x), sin(x)), 1.0)" + @test string(sin(x) + sin(x) + x) == "+(+(sin(x), sin(x)), x)" + @test string(sin(x) * sin(x) * 1) == "*(*(sin(x), sin(x)), 1.0)" + @test string(sin(x) * sin(x) * x) == "*(*(sin(x), sin(x)), x)" + return +end + +function test_zero_one() + @test string(zero(NonlinearExpr)) == "+(0.0)" + @test string(one(NonlinearExpr)) == "+(1.0)" + return +end + +function test_latex() + model = Model() + @variable(model, x) + @test function_string(MIME("text/latex"), sin(x)) == "\\textsf{sin(x)}" + @test function_string(MIME("text/plain"), sin(x)) == "sin(x)" + return +end + +function test_expression_addmul() + model = Model() + @variable(model, x) + @test string(@expression(model, x + 3 * sin(x))) == "+(x, *(3.0, sin(x)))" + @test string(@expression(model, 2 * x + 3 * sin(x))) == + "+(2 x, *(3.0, sin(x)))" + @test string(@expression(model, x^2 + 3 * sin(x))) == + "+($(x^2), *(3.0, sin(x)))" + @test string(@expression(model, sin(x) + 3 * sin(x))) == + "+(sin(x), *(3.0, sin(x)))" + @test string(@expression(model, sin(x) + 3 * x)) == "+(sin(x), 3 x)" + @test string(@expression(model, sin(x) + 3 * x * x)) == + "+(sin(x), 3 $(x^2))" + return +end + +function test_expression_submul() + model = Model() + @variable(model, x) + @test string(@expression(model, x - 3 * sin(x))) == "-(x, *(3.0, sin(x)))" + @test string(@expression(model, 2 * x - 3 * sin(x))) == + "-(2 x, *(3.0, sin(x)))" + @test string(@expression(model, x^2 - 3 * sin(x))) == + "-($(x^2), *(3.0, sin(x)))" + @test string(@expression(model, sin(x) - 3 * sin(x))) == + "-(sin(x), *(3.0, sin(x)))" + @test string(@expression(model, sin(x) - 3 * x)) == "-(sin(x), 3 x)" + @test string(@expression(model, sin(x) - 3 * x * x)) == + "-(sin(x), 3 $(x^2))" + return +end + +function test_aff_expr_convert() + model = Model() + @variable(model, x) + _to_string(x) = string(convert(NonlinearExpr, x)) + @test _to_string(AffExpr(0.0)) == "0.0" + @test _to_string(AffExpr(1.0)) == "1.0" + @test _to_string(x + 1) == "+(x, 1.0)" + @test _to_string(2x + 1) == "+(*(2.0, x), 1.0)" + @test _to_string(2x) == "*(2.0, x)" + return +end + +function test_quad_expr_convert() + model = Model() + @variable(model, x) + _to_string(x) = string(convert(NonlinearExpr, x)) + @test _to_string(QuadExpr(AffExpr(0.0))) == "0.0" + @test _to_string(QuadExpr(AffExpr(1.0))) == "1.0" + @test _to_string(x^2 + 1) == "+(*(x, x), 1.0)" + @test _to_string(2x^2 + 1) == "+(*(2.0, x, x), 1.0)" + @test _to_string(2x^2) == "*(2.0, x, x)" + @test _to_string(x^2 + x + 1) == "+(x, *(x, x), 1.0)" + @test _to_string(2x^2 + x + 1) == "+(x, *(2.0, x, x), 1.0)" + @test _to_string(2x^2 + x) == "+(x, *(2.0, x, x))" + @test _to_string(x^2 + 2x + 1) == "+(*(2.0, x), *(x, x), 1.0)" + @test _to_string(2x^2 + 2x + 1) == "+(*(2.0, x), *(2.0, x, x), 1.0)" + @test _to_string(2x^2 + 2x) == "+(*(2.0, x), *(2.0, x, x))" + return +end + +function test_constraint_name() + model = Model() + @variable(model, x) + @constraint(model, c, sin(x) <= 1) + @test name(c) == "c" + set_name(c, "d") + @test name(c) == "d" + @test startswith(string(c), "d: ") + return +end + +function test_constraint_lessthan() + model = Model() + @variable(model, x) + @constraint(model, c, 2.0 * sin(x)^2 + cos(x) / x <= 1) + nlp = nonlinear_model(model) + @test nlp[index(c)] isa MOI.Nonlinear.Constraint + @test nlp[index(c)].set == MOI.LessThan(0.0) + return +end + +function test_constraint_greaterthan() + model = Model() + @variable(model, x) + @constraint(model, c, 2.0 * sin(x)^2 + cos(x) / x >= 1) + nlp = nonlinear_model(model) + @test nlp[index(c)] isa MOI.Nonlinear.Constraint + @test nlp[index(c)].set == MOI.GreaterThan(0.0) + return +end + +function test_constraint_equalto() + model = Model() + @variable(model, x) + @constraint(model, c, 2.0 * sin(x)^2 + cos(x) / x == 1) + nlp = nonlinear_model(model) + @test nlp[index(c)] isa MOI.Nonlinear.Constraint + @test nlp[index(c)].set == MOI.EqualTo(0.0) + return +end + +function test_constraint_interval() + model = Model() + @variable(model, x) + @constraint(model, c, 0 <= 2.0 * sin(x)^2 + cos(x) / x <= 1) + nlp = nonlinear_model(model) + @test nlp[index(c)] isa MOI.Nonlinear.Constraint + @test nlp[index(c)].set == MOI.Interval(0.0, 1.0) + return +end + +function test_user_defined_function_overload() + model = Model() + @variable(model, x) + f(x::Real) = x^2 + f(x::AbstractJuMPScalar) = NonlinearExpr(:f, x) + register(model, :f, 1, f; autodiff = true) + @test string(@expression(model, f(x))) == "f(x)" + @test string(f(x) + f(x)) == "+(f(x), f(x))" + return +end + +function test_nonlinear_matrix_algebra() + model = Model() + @variable(model, X[1:3, 1:3], Symmetric) + @objective(model, Max, sum(X^4 .- X^3)) + nlp = nonlinear_model(model) + Y = [0.1 0.2 0.3; 0.2 0.4 0.5; 0.3 0.5 0.6] + data = Dict(index(X[i, j]) => Y[i, j] for i in 1:3 for j in 1:i) + obj = MOI.Nonlinear.evaluate(data, nlp, nlp.objective) + @test obj ≈ sum(Y^4 .- Y^3) + return +end + +""" +This test checks that we can work with expressions of arbitrary depth. Don't use +recursion! +""" +function test_recursion_stackoverflow() + model = Model() + @variable(model, x) + expr = sin(x) + for _ in 1:20_000 + expr = sin(expr) + end + @test @objective(model, Min, expr) isa NonlinearExpr + @test string(expr) isa String + return +end + +function test_nlparameter_interaction() + model = Model() + @variable(model, x) + @NLparameter(model, p == 1) + e = x + p + @test e isa NonlinearExpr + @test string(e) == "+(x, $p)" + return +end + +function test_nlexpression_interaction() + model = Model() + @variable(model, x) + @NLexpression(model, expr, sin(x)) + e = x + expr + @test e isa NonlinearExpr + @test string(e) == "+(x, $expr)" + return +end + +function test_nlobjective_with_nlexpr() + model = Model() + @variable(model, x) + y = sin(x) + @NLobjective(model, Min, y^2) + nlp = nonlinear_model(model) + @test isequal_canonical(jump_function(model, nlp.objective), sin(x)^2) + return +end + +function test_nlconstraint_with_nlexpr() + model = Model() + @variable(model, x) + y = sin(x) + @NLconstraint(model, c, y^2 <= 1) + nlp = nonlinear_model(model) + @test isequal_canonical( + jump_function(model, nlp.constraints[index(c)].expression), + sin(x)^2 - 1, + ) + return +end + +function test_jump_function_nonlinearexpr() + model = Model() + @variable(model, x) + @NLparameter(model, p == 1) + @NLexpression(model, expr1, sin(p + x)) + @NLexpression(model, expr2, sin(expr1)) + nlp = nonlinear_model(model) + @test string(jump_function(model, nlp[index(expr1)])) == "sin(+($p, $x))" + @test string(jump_function(model, nlp[index(expr2)])) == "sin($expr1)" + return +end + +function test_constraint_object() + model = Model() + @variable(model, x) + y = sin(x) + @NLconstraint(model, c, y^2 <= 1) + con = constraint_object(c) + @test isequal_canonical(con.func, sin(x)^2 - 1.0) + @test con.set == MOI.LessThan(0.0) + return +end + +function test_expr_mle() + data = [1.0, 2.0, 4.0, 8.0] + n = length(data) + model = Model() + @variable(model, x) + @variable(model, y) + obj = @expression( + model, + n / 2 * log(1 / (2 * y^2)) - + sum((data[i] - x)^2 for i in 1:n) / (2 * y^2) + ) + @test string(obj) == + "-(*(2.0, log(/(1.0, 2 $(y^2)))), /(4 $(x^2) - 30 x + 85, 2 $(y^2)))" + return +end + +end + +TestNLPExpr.runtests() diff --git a/test/perf/nonlinear_expr.jl b/test/perf/nonlinear_expr.jl new file mode 100644 index 00000000000..4be70eba943 --- /dev/null +++ b/test/perf/nonlinear_expr.jl @@ -0,0 +1,440 @@ +module NonlinearBenchmark + +using JuMP +import BenchmarkTools +import InfiniteOpt +import Ipopt +import Random +import Symbolics + +function benchmark_group() + lookup = Dict( + "perf_nl_" => "@NL", + "perf_nlexpr_" => "NonlinearExpr", + "perf_infopt_" => "InfiniteOpt", + "perf_symbolics_" => "Symbolics", + ) + suite = BenchmarkTools.BenchmarkGroup() + for v in values(lookup) + suite[v] = BenchmarkTools.BenchmarkGroup() + end + for name in names(@__MODULE__; all = true) + f = getfield(@__MODULE__, name) + for (k, v) in lookup + if startswith("$name", k) + fname = replace("$name", k => "") + suite[v][fname] = BenchmarkTools.@benchmarkable $f() + break + end + end + end + return suite +end + +function runbenchmarks() + suite = benchmark_group() + return BenchmarkTools.run(suite) +end + +# sum +# +# nlexpr is slower because it builds up the product via operator overloading, +# creating a lot of temporary objects. @NL gets to see the full +(args...) to it +# builds the expression in-place. +# +# We could fix this by implementing a n-argy method for +, but that gets +# difficult with method ambiguities. + +function perf_nl_sum() + model = Model() + @variable(model, x) + @NLobjective(model, Min, sum(x^i for i in 1:10_000)) + return +end + +function perf_nlexpr_sum() + model = Model() + @variable(model, x) + @objective(model, Min, sum(x^Float64(i) for i in 1:10_000)) + return +end + +function perf_infopt_sum() + model = InfiniteOpt.InfiniteModel() + @variable(model, x) + @objective(model, Min, sum(x^i for i in 1:10_000)) + return +end + +function perf_symbolics_sum() + Symbolics.@variables x + sum(x^i for i in 1:10_000) + return +end + +# prod +# +# nlexpr is slower because it builds up the product via operator overloading, +# creating a lot of temporary objects. @NL gets to see the full *(args...) to it +# builds the expression in-place. +# +# We could fix this by implementing a n-argy method for *, but that gets +# difficult with method ambiguities. + +function perf_nl_prod() + model = Model() + @variable(model, x) + @NLobjective(model, Min, prod(x^i for i in 1:10_000)) + return +end + +function perf_nlexpr_prod() + model = Model() + @variable(model, x) + @objective(model, Min, prod(x^Float64(i) for i in 1:10_000)) + return +end + +function perf_infopt_prod() + model = InfiniteOpt.InfiniteModel() + @variable(model, x) + @objective(model, Min, prod(x^i for i in 1:10_000)) + return +end + +function perf_symbolics_prod() + Symbolics.@variables x + prod(x^i for i in 1:10_000) + return +end + +# many_constraints + +function perf_nl_many_constraints() + model = Model() + @variable(model, x[1:10_000]) + @NLconstraint(model, [i = 1:10_000], sin(x[i]) <= cos(i)) + return +end + +function perf_nlexpr_many_constraints() + model = Model() + @variable(model, x[1:10_000]) + @constraint(model, [i = 1:10_000], sin(x[i]) <= cos(i)) + return +end + +function perf_infopt_many_constraints() + model = InfiniteOpt.InfiniteModel() + @variable(model, x[1:10_000]) + @constraint(model, [i = 1:10_000], sin(x[i]) <= cos(i)) + return +end + +function perf_symbolics_many_constraints() + Symbolics.@variables x[1:10_000] + [sin(x[i]) - cos(i) for i in 1:10_000] + return +end + +# mle + +function perf_nl_mle() + Random.seed!(1234) + n = 1_000 + data = randn(n) + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, μ, start = 0.0) + @variable(model, σ >= 0.0, start = 1.0) + @NLobjective( + model, + Max, + n / 2 * log(1 / (2 * π * σ^2)) - + sum((data[i] - μ)^2 for i in 1:n) / (2 * σ^2) + ) + optimize!(model) + return +end + +function perf_nlexpr_mle() + Random.seed!(1234) + n = 1_000 + data = randn(n) + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, μ, start = 0.0) + @variable(model, σ >= 0.0, start = 1.0) + @objective( + model, + Max, + n / 2 * log(1 / (2 * π * σ^2)) - + sum((data[i] - μ)^2 for i in 1:n) / (2 * σ^2) + ) + optimize!(model) + return +end + +function perf_infopt_mle() + Random.seed!(1234) + n = 1_000 + data = randn(n) + model = InfiniteOpt.InfiniteModel(Ipopt.Optimizer) + set_silent(model) + @variable(model, μ, start = 0.0) + @variable(model, σ >= 0.0, start = 1.0) + @objective( + model, + Max, + n / 2 * log(1 / (2 * π * σ^2)) - + sum((data[i] - μ)^2 for i in 1:n) / (2 * σ^2) + ) + optimize!(model) + return +end + +function perf_symbolics_mle() + Random.seed!(1234) + n = 1_000 + data = randn(n) + Symbolics.@variables μ σ + n / 2 * log(1 / (2 * π * σ^2)) - + sum((data[i] - μ)^2 for i in 1:n) / (2 * σ^2) + return +end + +# clnlbeam + +function perf_nl_clnlbeam() + N = 1000 + h = 1 / N + alpha = 350 + model = Model(Ipopt.Optimizer) + set_silent(model) + @variables(model, begin + -1 <= t[1:(N+1)] <= 1 + -0.05 <= x[1:(N+1)] <= 0.05 + u[1:(N+1)] + end) + @NLobjective( + model, + Min, + sum( + 0.5 * h * (u[i+1]^2 + u[i]^2) + + 0.5 * alpha * h * (cos(t[i+1]) + cos(t[i])) for i in 1:N + ), + ) + @NLconstraint( + model, + [i = 1:N], + x[i+1] - x[i] - 0.5 * h * (sin(t[i+1]) + sin(t[i])) == 0, + ) + @constraint( + model, + [i = 1:N], + t[i+1] - t[i] - 0.5 * h * u[i+1] - 0.5 * h * u[i] == 0, + ) + optimize!(model) + return +end + +function perf_nlexpr_clnlbeam() + N = 1000 + h = 1 / N + alpha = 350 + model = Model(Ipopt.Optimizer) + set_silent(model) + @variables(model, begin + -1 <= t[1:(N+1)] <= 1 + -0.05 <= x[1:(N+1)] <= 0.05 + u[1:(N+1)] + end) + @objective( + model, + Min, + sum( + 0.5 * h * (u[i+1]^2 + u[i]^2) + + 0.5 * alpha * h * (cos(t[i+1]) + cos(t[i])) for i in 1:N + ), + ) + @constraint( + model, + [i = 1:N], + x[i+1] - x[i] - 0.5 * h * (sin(t[i+1]) + sin(t[i])) == 0, + ) + @constraint( + model, + [i = 1:N], + t[i+1] - t[i] - 0.5 * h * u[i+1] - 0.5 * h * u[i] == 0, + ) + optimize!(model) + return +end + +function perf_infopt_clnlbeam() + N = 1000 + h = 1 / N + alpha = 350 + model = InfiniteOpt.InfiniteModel(Ipopt.Optimizer) + set_silent(model) + @variables(model, begin + -1 <= t[1:(N+1)] <= 1 + -0.05 <= x[1:(N+1)] <= 0.05 + u[1:(N+1)] + end) + @objective( + model, + Min, + sum( + 0.5 * h * (u[i+1]^2 + u[i]^2) + + 0.5 * alpha * h * (cos(t[i+1]) + cos(t[i])) for i in 1:N + ), + ) + @constraint( + model, + [i = 1:N], + x[i+1] - x[i] - 0.5 * h * (sin(t[i+1]) + sin(t[i])) == 0, + ) + @constraint( + model, + [i = 1:N], + t[i+1] - t[i] - 0.5 * h * u[i+1] - 0.5 * h * u[i] == 0, + ) + optimize!(model) + return +end + +# rosenbrock + +function perf_nl_rosenbrock() + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, x) + @variable(model, y) + @NLobjective(model, Min, (1 - x)^2 + 100 * (y - x^2)^2) + optimize!(model) + return +end + +function perf_nlexpr_rosenbrock() + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, x) + @variable(model, y) + @objective(model, Min, (1 - x)^2 + 100 * (y - x^2)^2) + optimize!(model) + return +end + +function perf_infopt_rosenbrock() + model = InfiniteOpt.InfiniteModel(Ipopt.Optimizer) + set_silent(model) + @variable(model, x) + @variable(model, y) + @objective(model, Min, (1 - x)^2 + 100 * (y - x^2)^2) + optimize!(model) + return +end + +function perf_symbolics_rosenbrock() + Symbolics.@variables x y + (1 - x)^2 + 100 * (y - x^2)^2 + return +end + +# JuMP#2788 + +function perf_nl_jump_2788() + N = 400 + Random.seed!(1234) + k = N + n = 12 + p = rand(400:700, k, 1) + c1 = rand(100:200, k, n) + c2 = 0.9 .* c1 + b = rand(150:250, k, 1) + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, 0 <= x[i = 1:n] <= 1) + @variable(model, 0 <= var1 <= 1) + @variable(model, 0 <= var2 <= 1) + @variable(model, 0 <= var3 <= 1) + @objective(model, Max, var1 - var2 + var3) + @NLexpression(model, expr, sum(x[i] * p[i] for i in 1:n)) + @NLexpression(model, expr_c1[j = 1:k], sum(x[i] * c1[j, i] for i in 1:n)) + @NLexpression(model, expr_c2[j = 1:k], sum(x[i] * c2[j, i] for i in 1:n)) + @NLconstraint(model, expr == sum(b[j] / (1 + var1)^j for j in 1:k)) + @NLconstraint(model, expr == sum(expr_c1[j] / (1 + var2)^j for j in 1:k)) + @NLconstraint(model, expr == sum(expr_c2[j] / (1 + var3)^j for j in 1:k)) + @NLconstraint(model, [j = 1:k], expr_c1[j] >= b[j]) + optimize!(model) + return +end + +function perf_nlexpr_jump_2788() + N = 400 + Random.seed!(1234) + k = N + n = 12 + p = rand(400:700, k, 1) + c1 = rand(100:200, k, n) + c2 = 0.9 .* c1 + b = rand(150:250, k, 1) + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, 0 <= x[i = 1:n] <= 1) + @variable(model, 0 <= var1 <= 1) + @variable(model, 0 <= var2 <= 1) + @variable(model, 0 <= var3 <= 1) + @objective(model, Max, var1 - var2 + var3) + @expression(model, expr, sum(x[i] * p[i] for i in 1:n)) + @expression(model, expr_c1[j = 1:k], sum(x[i] * c1[j, i] for i in 1:n)) + @expression(model, expr_c2[j = 1:k], sum(x[i] * c2[j, i] for i in 1:n)) + @constraint(model, expr == sum(b[j] / (1 + var1)^Float64(j) for j in 1:k)) + @constraint( + model, + expr == sum(expr_c1[j] / (1 + var2)^Float64(j) for j in 1:k), + ) + @constraint( + model, + expr == sum(expr_c2[j] / (1 + var3)^Float64(j) for j in 1:k), + ) + @constraint(model, [j = 1:k], expr_c1[j] >= b[j]) + optimize!(model) + return +end + +function perf_infopt_jump_2788() + N = 400 + Random.seed!(1234) + k = N + n = 12 + p = rand(400:700, k, 1) + c1 = rand(100:200, k, n) + c2 = 0.9 .* c1 + b = rand(150:250, k, 1) + model = InfiniteOpt.InfiniteModel(Ipopt.Optimizer) + set_silent(model) + @variable(model, 0 <= x[i = 1:n] <= 1) + @variable(model, 0 <= var1 <= 1) + @variable(model, 0 <= var2 <= 1) + @variable(model, 0 <= var3 <= 1) + @objective(model, Max, var1 - var2 + var3) + @expression(model, expr, sum(x[i] * p[i] for i in 1:n)) + @expression(model, expr_c1[j = 1:k], sum(x[i] * c1[j, i] for i in 1:n)) + @expression(model, expr_c2[j = 1:k], sum(x[i] * c2[j, i] for i in 1:n)) + @constraint(model, expr == sum(b[j] / (1 + var1)^Float64(j) for j in 1:k)) + @constraint( + model, + expr == sum(expr_c1[j] / (1 + var2)^Float64(j) for j in 1:k), + ) + @constraint( + model, + expr == sum(expr_c2[j] / (1 + var3)^Float64(j) for j in 1:k), + ) + @constraint(model, [j = 1:k], expr_c1[j] >= b[j]) + optimize!(model) + return +end + +end # module diff --git a/test/perf/nonlinear_expr.py b/test/perf/nonlinear_expr.py new file mode 100644 index 00000000000..f60839949f9 --- /dev/null +++ b/test/perf/nonlinear_expr.py @@ -0,0 +1,143 @@ +import pyomo.environ as pyo +from pyomo.opt import SolverFactory +import math +import random +import time + +def time_reps(func, iteration_limit=100, time_limit=10): + start = time.time() + reps = 0 + for i in range(0, iteration_limit): + func() + reps += 1 + if time.time() - start > time_limit: + break + end = time.time() + avg_ms = (end - start) / reps * 1000 + print("%s => %.3f ms" % (func.__name__, avg_ms)) + return + +def perf_pyomo_sum(): + model = pyo.ConcreteModel() + model.x = pyo.Var() + model.obj = pyo.Objective(expr=sum(model.x**i for i in range(10000))) + return + +def perf_pyomo_prod(): + model = pyo.ConcreteModel() + model.x = pyo.Var() + model.obj = pyo.Objective(expr=math.prod(model.x**i for i in range(10000))) + return + +def perf_pyomo_many_constraints(): + model = pyo.ConcreteModel() + model.X = pyo.RangeSet(0, 10000) + model.x = pyo.Var(model.X) + def constraint(model, i): + return pyo.sin(model.x[i]) <= pyo.cos(i) + model.c = pyo.Constraint(model.X, rule=constraint) + return + +def perf_pyomo_mle(): + model = pyo.ConcreteModel() + n = 1000 + model.x = pyo.Var(initialize=0.0) + model.y = pyo.Var(within=pyo.NonNegativeReals, initialize=1.0) + data = [random.random() for _ in range(n)] + model.obj = pyo.Objective( + expr = n / 2 * pyo.log(1 / (2 * math.pi * model.y**2)) - + sum((data[i] - model.x)**2 for i in range(n)) / (2 * model.y**2), + sense = pyo.maximize, + ) + opt = SolverFactory("ipopt") + opt.solve(model, tee=False) + return + +def perf_pyomo_clnlbeam(): + N = 1000 + h = 1 / N + alpha = 350 + model = pyo.ConcreteModel() + model.S = pyo.RangeSet(1,N+1) + model.S2 = pyo.RangeSet(1,N) + model.t = pyo.Var(model.S, bounds=(-1.0, 1.0)) + model.x = pyo.Var(model.S, bounds=(-0.05, 0.05)) + model.u = pyo.Var(model.S) + model.obj = pyo.Objective( + expr = sum( + 0.5 * h * (model.u[i+1]**2 + model.u[i]**2) + + 0.5 * alpha * h * (pyo.cos(model.t[i+1]) + pyo.cos(model.t[i])) + for i in model.S2 + ) + ) + def con_1(model, i): + return model.x[i+1] - model.x[i] - 0.5 * h * (pyo.sin(model.t[i+1]) + pyo.sin(model.t[i])) == 0 + model.c1 = pyo.Constraint(model.S2, rule=con_1) + def con_2(model, i): + return model.t[i+1] - model.t[i] - 0.5 * h * model.u[i+1] - 0.5 * h * model.u[i] == 0 + model.c2 = pyo.Constraint(model.S2, rule=con_2) + opt = SolverFactory("ipopt") + opt.solve(model, tee=False) + return + +def perf_pyomo_rosenbrock(): + model = pyo.ConcreteModel() + model.x = pyo.Var() + model.y = pyo.Var() + model.obj = pyo.Objective( + expr = (1 - model.x)**2 + 100 * (model.y - model.x**2)**2 + ) + opt = SolverFactory("ipopt") + opt.solve(model, tee=False) + return + +def perf_pyomo_jump_2788(): + N = 400 + k = N + n = 12 + p = [random.randint(400, 700) for _ in range(k)] + c1 = [[random.randint(100, 200) for _ in range(k)] for _ in range(n)] + b = [random.randint(150, 250) for _ in range(k)] + model = pyo.ConcreteModel() + model.S = pyo.RangeSet(1, n) + model.K = pyo.RangeSet(1, k) + model.x = pyo.Var(model.S, bounds=(0, 1)) + model.var1 = pyo.Var(bounds=(0, 1)) + model.var2 = pyo.Var(bounds=(0, 1)) + model.var3 = pyo.Var(bounds=(0, 1)) + model.obj = pyo.Objective( + expr=model.var1 - model.var2 + model.var3, + sense=pyo.maximize, + ) + model.expr = sum(model.x[i] * p[i-1] for i in model.S) + def expr_c1(model, j): + return sum(model.x[i] * c1[i-1][j-1] for i in model.S) + def expr_c2(model, j): + return sum(model.x[i] * 0.9 * c1[i-1][j-1] for i in model.S) + model.con1 = pyo.Constraint( + expr=model.expr==sum(b[j-1]/(1+model.var1)**j for j in model.K), + ) + model.con2 = pyo.Constraint( + expr=model.expr==sum(expr_c1(model, j)/(1+model.var2)**j for j in model.K), + ) + model.con3 = pyo.Constraint( + expr=model.expr==sum(expr_c2(model, j)/(1+model.var3)**j for j in model.K) + ) + def con_4(model, j): + return expr_c1(model, j) >= b[j-1] + model.con4 = pyo.Constraint(model.K, rule=con_4) + opt = SolverFactory("ipopt") + opt.solve(model, tee=False) + return + +if __name__ == "__main__": + for f in [ + perf_pyomo_sum, + perf_pyomo_prod, + perf_pyomo_many_constraints, + perf_pyomo_mle, + perf_pyomo_clnlbeam, + perf_pyomo_rosenbrock, + perf_pyomo_jump_2788, + ]: + time_reps(f) diff --git a/test/test_operator.jl b/test/test_operator.jl index 075d8a5d8e8..dbc5a1d7946 100644 --- a/test/test_operator.jl +++ b/test/test_operator.jl @@ -106,10 +106,10 @@ function test_extension_broadcast_division_error( copy(B.rowval), vec(x), ) - @test_throws ErrorException A ./ x - @test_throws ErrorException B ./ x - @test_throws ErrorException A ./ y - @test_throws ErrorException B ./ y + @test A ./ x isa Matrix{NonlinearExpr} + @test B ./ x isa SparseArrays.SparseMatrixCSC{NonlinearExpr,Int} + @test A ./ y isa SparseArrays.SparseMatrixCSC{NonlinearExpr,Int} + @test B ./ y isa SparseArrays.SparseMatrixCSC{NonlinearExpr,Int} # TODO: Refactor to avoid calling the internal JuMP function # `_densify_with_jump_eltype`. #z = _densify_with_jump_eltype((2 .* y) ./ 3) @@ -335,17 +335,17 @@ function test_extension_basic_operators_number( @test_expression_with_string 4.13 + w "w + 4.13" @test_expression_with_string 3.16 - w "-w + 3.16" @test_expression_with_string 5.23 * w "5.23 w" - @test_throws ErrorException 2.94 / w + @test_expression_with_string 2.94 / w "/(2.94, w)" # 1-3 Number--AffExpr @test_expression_with_string 1.5 + aff "7.1 x + 4" @test_expression_with_string 1.5 - aff "-7.1 x - 1" @test_expression_with_string 2 * aff "14.2 x + 5" - @test_throws ErrorException 2 / aff + @test_expression_with_string 2 / aff "/(2.0, 7.1 x + 2.5)" # 1-4 Number--QuadExpr @test_expression_with_string 1.5 + q "2.5 y*z + 7.1 x + 4" @test_expression_with_string 1.5 - q "-2.5 y*z - 7.1 x - 1" @test_expression_with_string 2 * q "5 y*z + 14.2 x + 5" - @test_throws ErrorException 2 / q + @test_expression_with_string 2 / q "/(2.0, 2.5 y*z + 7.1 x + 2.5)" return end @@ -377,26 +377,26 @@ function test_extension_basic_operators_variable( @test_expression_with_string x^1 "x" @test_expression_with_string x^0 "1" @test_throws ErrorException x^3 - @test_throws ErrorException x^(T(15) / T(10)) + @test_expression_with_string x^(T(15) / T(10)) "^(x, 1.5)" # 2-2 Variable--Variable @test_expression_with_string w + x "w + x" @test_expression_with_string w - x "w - x" @test_expression_with_string w * x "w*x" @test_expression_with_string x - x "0" - @test_throws ErrorException w / x + @test_expression_with_string w / x "/(w, x)" @test_expression_with_string y * z - x "y*z - x" # 2-3 Variable--AffExpr @test_expression_with_string z + aff "z + 7.1 x + 2.5" @test_expression_with_string z - aff "z - 7.1 x - 2.5" @test_expression_with_string z * aff "7.1 z*x + 2.5 z" - @test_throws ErrorException z / aff + @test_expression_with_string z / aff "/(z, 7.1 x + 2.5)" @test_throws MethodError z ≤ aff @test_expression_with_string β * x - aff "0 x - 2.5" # 2-4 Variable--QuadExpr @test_expression_with_string w + q "2.5 y*z + w + 7.1 x + 2.5" @test_expression_with_string w - q "-2.5 y*z + w - 7.1 x - 2.5" - @test_throws ErrorException w * q - @test_throws ErrorException w / q + @test_expression_with_string w * q "*(w, 2.5 y*z + 7.1 x + 2.5)" + @test_expression_with_string w / q "/(w, 2.5 y*z + 7.1 x + 2.5)" @test transpose(x) === x @test conj(x) === x return @@ -435,26 +435,26 @@ function test_extension_basic_operators_affexpr( @test_expression_with_string (7.1 * x + 2.5)^0 "1" @test_throws ErrorException aff^3 @test_throws ErrorException (7.1 * x + 2.5)^3 - @test_throws ErrorException aff^1.5 - @test_throws ErrorException (7.1 * x + 2.5)^1.5 + @test_expression_with_string aff^1.5 "^(7.1 x + 2.5, 1.5)" + @test_expression_with_string (7.1 * x + 2.5)^1.5 "^(7.1 x + 2.5, 1.5)" # 3-2 AffExpr--Variable @test_expression_with_string aff + z "7.1 x + z + 2.5" @test_expression_with_string aff - z "7.1 x - z + 2.5" @test_expression_with_string aff * z "7.1 x*z + 2.5 z" - @test_throws ErrorException aff / z + @test_expression_with_string aff / z "/(7.1 x + 2.5, z)" @test_expression_with_string aff - 7.1 * x "0 x + 2.5" # 3-3 AffExpr--AffExpr @test_expression_with_string aff + aff2 "7.1 x + 1.2 y + 3.7" @test_expression_with_string aff - aff2 "7.1 x - 1.2 y + 1.3" @test_expression_with_string aff * aff2 "8.52 x*y + 3 y + 8.52 x + 3" @test string((x + x) * (x + 3)) == string((x + 3) * (x + x)) # Issue #288 - @test_throws ErrorException aff / aff2 + @test_expression_with_string aff / aff2 "/(7.1 x + 2.5, 1.2 y + 1.2)" @test_expression_with_string aff - aff "0 x" # 4-4 AffExpr--QuadExpr @test_expression_with_string aff2 + q "2.5 y*z + 1.2 y + 7.1 x + 3.7" @test_expression_with_string aff2 - q "-2.5 y*z + 1.2 y - 7.1 x - 1.3" - @test_throws ErrorException aff2 * q - @test_throws ErrorException aff2 / q + @test_expression_with_string aff2 * q "*(1.2 y + 1.2, 2.5 y*z + 7.1 x + 2.5)" + @test_expression_with_string aff2 / q "/(1.2 y + 1.2, 2.5 y*z + 7.1 x + 2.5)" @test transpose(aff) === aff @test conj(aff) === aff return @@ -486,18 +486,18 @@ function test_extension_basic_operators_quadexpr( # 4-2 QuadExpr--Variable @test_expression_with_string q + w "2.5 y*z + 7.1 x + w + 2.5" @test_expression_with_string q - w "2.5 y*z + 7.1 x - w + 2.5" - @test_throws ErrorException q * w - @test_throws ErrorException q / w + @test_expression_with_string q * w "*(2.5 y*z + 7.1 x + 2.5, w)" + @test_expression_with_string q / w "/(2.5 y*z + 7.1 x + 2.5, w)" # 4-3 QuadExpr--AffExpr @test_expression_with_string q + aff2 "2.5 y*z + 7.1 x + 1.2 y + 3.7" @test_expression_with_string q - aff2 "2.5 y*z + 7.1 x - 1.2 y + 1.3" - @test_throws ErrorException q * aff2 - @test_throws ErrorException q / aff2 + @test_expression_with_string q * aff2 "*(2.5 y*z + 7.1 x + 2.5, 1.2 y + 1.2)" + @test_expression_with_string q / aff2 "/(2.5 y*z + 7.1 x + 2.5, 1.2 y + 1.2)" # 4-4 QuadExpr--QuadExpr @test_expression_with_string q + q2 "2.5 y*z + 8 x*z + 7.1 x + 1.2 y + 3.7" @test_expression_with_string q - q2 "2.5 y*z - 8 x*z + 7.1 x - 1.2 y + 1.3" - @test_throws ErrorException q * q2 - @test_throws ErrorException q / q2 + @test_expression_with_string q * q2 "*(2.5 y*z + 7.1 x + 2.5, 8 x*z + 1.2 y + 1.2)" + @test_expression_with_string q / q2 "/(2.5 y*z + 7.1 x + 2.5, 8 x*z + 1.2 y + 1.2)" @test transpose(q) === q @test conj(q) === q return From 0f3879cb208d158521d9101c882ac7b8746caf89 Mon Sep 17 00:00:00 2001 From: odow Date: Tue, 31 Jan 2023 16:51:57 +1300 Subject: [PATCH 03/23] Update to MOI.ScalarNonlinearFunction Add MOI branch --- .github/workflows/aqua.yml | 1 + Project.toml | 1 + src/JuMP.jl | 1 - src/nlp.jl | 11 ---- src/nlp_expr.jl | 74 ++++++++++++++++++-------- src/print.jl | 4 -- test/runtests.jl | 3 ++ test/{nlp_expr.jl => test_nlp_expr.jl} | 49 ++++++----------- 8 files changed, 74 insertions(+), 70 deletions(-) rename test/{nlp_expr.jl => test_nlp_expr.jl} (89%) diff --git a/.github/workflows/aqua.yml b/.github/workflows/aqua.yml index 8e14f591650..58049f78490 100644 --- a/.github/workflows/aqua.yml +++ b/.github/workflows/aqua.yml @@ -18,6 +18,7 @@ jobs: run: | using Pkg Pkg.add(PackageSpec(name="Aqua")) + Pkg.pkg"add MathOptInterface#od/nlp-expr" Pkg.develop(PackageSpec(path=pwd())) using JuMP, Aqua Aqua.test_all(JuMP; ambiguities = false) diff --git a/Project.toml b/Project.toml index 4e4fc6b1785..e3881c63af5 100644 --- a/Project.toml +++ b/Project.toml @@ -8,6 +8,7 @@ LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" MutableArithmetics = "d8a4904e-b15c-11e9-3269-09a3773c0cb0" OrderedCollections = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" +Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" SnoopPrecompile = "66db9d55-30c0-4569-8b51-7e840670fc0c" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" diff --git a/src/JuMP.jl b/src/JuMP.jl index 88a930a8f80..ee41cb1a075 100644 --- a/src/JuMP.jl +++ b/src/JuMP.jl @@ -1080,7 +1080,6 @@ include("aff_expr.jl") include("quad_expr.jl") include("nlp.jl") include("nlp_expr.jl") - include("macros.jl") include("optimizer_interface.jl") diff --git a/src/nlp.jl b/src/nlp.jl index 205f5e86902..47af4693a7d 100644 --- a/src/nlp.jl +++ b/src/nlp.jl @@ -25,8 +25,6 @@ end function _init_NLP(model::GenericModel{Float64}) if model.nlp_model === nothing model.nlp_model = MOI.Nonlinear.Model() - # TODO(odow): move this into MOI - model.ext[:nlp_constraint_names] = Dict{NonlinearConstraintRef,String}() end return end @@ -553,15 +551,6 @@ function value(var_value::Function, c::NonlinearConstraintRef) ) end -function name(c::NonlinearConstraintRef) - return get(c.model.ext[:nlp_constraint_names], c, "") -end - -function set_name(c::NonlinearConstraintRef, name::String) - c.model.ext[:nlp_constraint_names][c] = name - return -end - ### ### Nonlinear dual solutions ### diff --git a/src/nlp_expr.jl b/src/nlp_expr.jl index c17e1b0bd22..02062795f3f 100644 --- a/src/nlp_expr.jl +++ b/src/nlp_expr.jl @@ -141,29 +141,61 @@ end # JuMP interop -function set_objective_function(model::Model, expr::NonlinearExpr) - nlp = nonlinear_model(model; force = true) - MOI.Nonlinear.set_objective(nlp, expr) - return +# TODO +check_belongs_to_model(::NonlinearExpr, ::Model) = true + +function moi_function(f::NonlinearExpr) + ret = MOI.ScalarNonlinearFunction{Float64}(f.head, Any[]) + stack = Tuple{MOI.ScalarNonlinearFunction{Float64},Any}[] + for arg in reverse(f.args) + push!(stack, (ret, arg)) + end + while !isempty(stack) + parent, arg = pop!(stack) + if arg isa NonlinearExpr + new_ret = MOI.ScalarNonlinearFunction{Float64}(arg.head, Any[]) + push!(parent.args, new_ret) + for child in reverse(arg.args) + push!(stack, (new_ret, child)) + end + elseif arg isa Number + push!(parent.args, arg) + else + push!(parent.args, moi_function(arg)) + end + end + return ret +end + +function jump_function(model::Model, f::MOI.ScalarNonlinearFunction) + ret = NonlinearExpr(f.head, Any[]) + stack = Tuple{NonlinearExpr,Any}[] + for arg in reverse(f.args) + push!(stack, (ret, arg)) + end + while !isempty(stack) + parent, arg = pop!(stack) + if arg isa MOI.ScalarNonlinearFunction + new_ret = NonlinearExpr(arg.head, Any[]) + push!(parent.args, new_ret) + for child in reverse(arg.args) + push!(stack, (new_ret, child)) + end + elseif arg isa Number + push!(parent.args, arg) + else + push!(parent.args, jump_function(model, arg)) + end + end + return ret +end + +function jump_function_type(::Model, ::Type{<:MOI.ScalarNonlinearFunction}) + return NonlinearExpr end -function add_constraint( - model::Model, - con::ScalarConstraint{NonlinearExpr,S}, - name::String, -) where { - S<:Union{ - MOI.LessThan{Float64}, - MOI.GreaterThan{Float64}, - MOI.EqualTo{Float64}, - MOI.Interval{Float64}, - }, -} - nlp = nonlinear_model(model; force = true) - index = MOI.Nonlinear.add_constraint(nlp, con.func, con.set) - con_ref = ConstraintRef(model, index, ScalarShape()) - set_name(con_ref, name) - return con_ref +function moi_function_type(::Type{NonlinearExpr}) + return MOI.ScalarNonlinearFunction{Float64} end function constraint_object(c::NonlinearConstraintRef) diff --git a/src/print.jl b/src/print.jl index 10d21f70e19..ba03e5dccd7 100644 --- a/src/print.jl +++ b/src/print.jl @@ -866,10 +866,6 @@ end function Base.show(io::IO, c::NonlinearConstraintRef) index = MOI.Nonlinear.ConstraintIndex(c.index.value) str = nonlinear_constraint_string(c.model, MIME("text/plain"), index) - n = name(c) - if !isempty(n) - print(io, n, ": ") - end return print(io, str) end diff --git a/test/runtests.jl b/test/runtests.jl index 30f203a8108..d6876c9b1c6 100755 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -8,6 +8,9 @@ # See https://github.com/jump-dev/JuMP.jl ############################################################################# +import Pkg +Pkg.pkg"add MathOptInterface#od/nlp-expr" + import JuMP import Test diff --git a/test/nlp_expr.jl b/test/test_nlp_expr.jl similarity index 89% rename from test/nlp_expr.jl rename to test/test_nlp_expr.jl index f749bd1896d..95a798fefab 100644 --- a/test/nlp_expr.jl +++ b/test/test_nlp_expr.jl @@ -8,17 +8,6 @@ module TestNLPExpr using JuMP using Test -function runtests() - for name in names(@__MODULE__; all = true) - if startswith("$name", "test_") - @testset "$(name)" begin - getfield(@__MODULE__, name)() - end - end - end - return -end - function test_univariate_operators() model = Model() @variable(model, x) @@ -59,7 +48,7 @@ function test_objective() model = Model() @variable(model, x) @objective(model, Min, 2.0 * sin(x)^2 + cos(x) / x) - @test JuMP._nlp_objective_function(model) isa MOI.Nonlinear.Expression + @test objective_function(model) isa NonlinearExpr return end @@ -178,7 +167,7 @@ function test_constraint_name() @test name(c) == "c" set_name(c, "d") @test name(c) == "d" - @test startswith(string(c), "d: ") + @test startswith(string(c), "d : ") return end @@ -186,9 +175,9 @@ function test_constraint_lessthan() model = Model() @variable(model, x) @constraint(model, c, 2.0 * sin(x)^2 + cos(x) / x <= 1) - nlp = nonlinear_model(model) - @test nlp[index(c)] isa MOI.Nonlinear.Constraint - @test nlp[index(c)].set == MOI.LessThan(0.0) + obj = constraint_object(c) + @test isequal_canonical(obj.func, 2.0 * sin(x)^2 + cos(x) / x - 1) + @test obj.set == MOI.LessThan(0.0) return end @@ -196,9 +185,9 @@ function test_constraint_greaterthan() model = Model() @variable(model, x) @constraint(model, c, 2.0 * sin(x)^2 + cos(x) / x >= 1) - nlp = nonlinear_model(model) - @test nlp[index(c)] isa MOI.Nonlinear.Constraint - @test nlp[index(c)].set == MOI.GreaterThan(0.0) + obj = constraint_object(c) + @test isequal_canonical(obj.func, 2.0 * sin(x)^2 + cos(x) / x - 1) + @test obj.set == MOI.GreaterThan(0.0) return end @@ -206,9 +195,9 @@ function test_constraint_equalto() model = Model() @variable(model, x) @constraint(model, c, 2.0 * sin(x)^2 + cos(x) / x == 1) - nlp = nonlinear_model(model) - @test nlp[index(c)] isa MOI.Nonlinear.Constraint - @test nlp[index(c)].set == MOI.EqualTo(0.0) + obj = constraint_object(c) + @test isequal_canonical(obj.func, 2.0 * sin(x)^2 + cos(x) / x - 1) + @test obj.set == MOI.EqualTo(0.0) return end @@ -216,9 +205,9 @@ function test_constraint_interval() model = Model() @variable(model, x) @constraint(model, c, 0 <= 2.0 * sin(x)^2 + cos(x) / x <= 1) - nlp = nonlinear_model(model) - @test nlp[index(c)] isa MOI.Nonlinear.Constraint - @test nlp[index(c)].set == MOI.Interval(0.0, 1.0) + obj = constraint_object(c) + @test isequal_canonical(obj.func, 2.0 * sin(x)^2 + cos(x) / x) + @test obj.set == MOI.Interval(0.0, 1.0) return end @@ -237,11 +226,7 @@ function test_nonlinear_matrix_algebra() model = Model() @variable(model, X[1:3, 1:3], Symmetric) @objective(model, Max, sum(X^4 .- X^3)) - nlp = nonlinear_model(model) - Y = [0.1 0.2 0.3; 0.2 0.4 0.5; 0.3 0.5 0.6] - data = Dict(index(X[i, j]) => Y[i, j] for i in 1:3 for j in 1:i) - obj = MOI.Nonlinear.evaluate(data, nlp, nlp.objective) - @test obj ≈ sum(Y^4 .- Y^3) + @test objective_function(model) isa NonlinearExpr return end @@ -343,6 +328,4 @@ function test_expr_mle() return end -end - -TestNLPExpr.runtests() +end # module From e9271200e24581d0ba9bf7dd4099110e34060e8f Mon Sep 17 00:00:00 2001 From: odow Date: Wed, 1 Feb 2023 14:34:10 +1300 Subject: [PATCH 04/23] Add support for VectorNonlinearFunction Update to latest More docs Fix docs --- docs/src/manual/expressions.md | 68 +++++++++++++++++++++++++++++-- src/nlp_expr.jl | 73 +++++++++++++++++++++++++++++++--- 2 files changed, 131 insertions(+), 10 deletions(-) diff --git a/docs/src/manual/expressions.md b/docs/src/manual/expressions.md index 56023a8c084..d8d450372d7 100644 --- a/docs/src/manual/expressions.md +++ b/docs/src/manual/expressions.md @@ -326,10 +326,70 @@ julia> coefficient(ex, x) ## Nonlinear expressions -Nonlinear expressions can be constructed only using the [`@NLexpression`](@ref) -macro and can be used only in [`@NLobjective`](@ref), [`@NLconstraint`](@ref), -and other [`@NLexpression`](@ref)s. For more details, see the [Nonlinear -Modeling](@ref) section. +Nonlinear expressions in JuMP are represented by a [`NonlinearExpr`](@ref) +object. + +### Constructors + +Nonlinear expressions can be created using the [`NonlinearExpr`](@ref) +constructors: + +```jldoctest nonlinear_expressions +julia> model = Model(); + +julia> @variable(model, x); + +julia> expr = NonlinearExpr(:sin, Any[x]) +sin(x) +``` + +or via operator overloading: + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> expr = sin(x) +sin(x) +``` + +### Fields + +Each [`NonlinearExpr`](@ref) has two fields. + +The `.head` field is a `Symbol` that represents the operator being called: + +```jldoctest nonlinear_expressions +julia> expr.head +:sin +``` + +The `.args` field is a `Vector{Any}` containing the arguments to the operator: + +```jldoctest nonlinear_expressions +julia> expr.args +1-element Vector{Any}: + x +``` + +### Supported arguments + +Nonlinear expressions can contain a mix of numbers, [`AffExpr`](@ref), +[`QuadExpr`](@ref), and other [`NonlinearExpr`](@ref): + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> aff = x + 1; + +julia> quad = x^2 + x; + +julia> expr = cos(x) * sin(quad) + aff ++(*(cos(x), sin(x² + x)), x + 1) +``` ## Initializing arrays diff --git a/src/nlp_expr.jl b/src/nlp_expr.jl index 02062795f3f..02e662673bf 100644 --- a/src/nlp_expr.jl +++ b/src/nlp_expr.jl @@ -3,6 +3,69 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. +""" + NonlinearExpr(head::Symbol, args::Vector{Any}) + NonlinearExpr(head::Symbol, args::Any...) + +The scalar-valued nonlinear function `head(args...)`, represented as a symbolic +expression tree, with the call operator `head` and ordered arguments in `args`. + +## `head` + +The `head::Symbol` must be an operator supported by the model. + +The default list of supported univariate operators is given by: + + * [`MOI.Nonlinear.DEFAULT_UNIVARIATE_OPERATORS`](@ref) + +and the default list of supported multivariate operators is given by: + + * [`MOI.Nonlinear.DEFAULT_MULTIVARIATE_OPERATORS`](@ref) + +Additional operators can be registered by setting a [`MOI.UserDefinedFunction`](@ref) +attribute. + +See the full list of operators supported by a [`MOI.ModelLike`](@ref) by +querying [`MOI.ListOfSupportedNonlinearOperators`](@ref). + +## `args` + +The vector `args` contains the arguments to the nonlinear function. If the +operator is univariate, it must contain one element. Otherwise, it may contain +multiple elements. Each element must be one of the following: + + * A constant value of type `T<:Number` + * A [`VariableRef`](@ref) + * An [`AffExpr`](@ref) + * A [`QuadExpr`](@ref) + * A [`NonlinearExpr`](@ref) + +## Unsupported operators + +If the optimizer does not support `head`, an [`MOI.UnsupportedNonlinearOperator`](@ref) +error will be thrown. + +There is no guarantee about when this error will be thrown; it may be thrown +when the function is first added to the model, or it may be thrown when +[`optimize!`](@ref) is called. + +## Example + +To represent the function ``f(x) = sin(x)^2``, do: + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x) +x + +julia> f = sin(x)^2 +^(sin(x), 2.0) + +julia> f = NonlinearExpr(:^, NonlinearExpr(:sin, x), 2.0) +^(sin(x), 2.0) +``` +""" struct NonlinearExpr <: AbstractJuMPScalar head::Symbol args::Vector{Any} @@ -145,15 +208,15 @@ end check_belongs_to_model(::NonlinearExpr, ::Model) = true function moi_function(f::NonlinearExpr) - ret = MOI.ScalarNonlinearFunction{Float64}(f.head, Any[]) - stack = Tuple{MOI.ScalarNonlinearFunction{Float64},Any}[] + ret = MOI.ScalarNonlinearFunction(f.head, Any[]) + stack = Tuple{MOI.ScalarNonlinearFunction,Any}[] for arg in reverse(f.args) push!(stack, (ret, arg)) end while !isempty(stack) parent, arg = pop!(stack) if arg isa NonlinearExpr - new_ret = MOI.ScalarNonlinearFunction{Float64}(arg.head, Any[]) + new_ret = MOI.ScalarNonlinearFunction(arg.head, Any[]) push!(parent.args, new_ret) for child in reverse(arg.args) push!(stack, (new_ret, child)) @@ -194,9 +257,7 @@ function jump_function_type(::Model, ::Type{<:MOI.ScalarNonlinearFunction}) return NonlinearExpr end -function moi_function_type(::Type{NonlinearExpr}) - return MOI.ScalarNonlinearFunction{Float64} -end +moi_function_type(::Type{NonlinearExpr}) = MOI.ScalarNonlinearFunction function constraint_object(c::NonlinearConstraintRef) nlp = nonlinear_model(c.model) From d352aa444085209e692469c28be4c1baacad8d27 Mon Sep 17 00:00:00 2001 From: odow Date: Thu, 13 Apr 2023 14:48:47 +1200 Subject: [PATCH 05/23] Add register macro Update format Add @NL macro Fix scoping Update formattinng Update tests Fix formatting More docs Update Updates Update Update Postwalk logic operators in expressions Update Update Fix docs Update Support integer exponents Update docs add_user_defined_function Update Fix typo Fix REPL blocks Update docs Update Add variable as type parameter Updates Add extension tests Fix formattinng Flatten associative operators Add first pass at evaluating nonlinear functions Change printing for binary infix operators Update Fix printing Fix format Re-enable PDF build Fix latex printing Fix Updates MathOptInterface#master Update packge versions Remove Pkg.pkg in runtests Apply suggestions from code review Flatten expressions at end, not during overloads Remove benchmark files Fix show Make flatten walk entire expression Update test_nlp_expr.jl --- .github/workflows/aqua.yml | 1 - Project.toml | 3 +- docs/make.jl | 5 +- docs/src/manual/expressions.md | 40 +- docs/src/manual/nlp.md | 849 ------------------ docs/src/manual/nlp_expr.md | 351 ++++++++ docs/src/manual/objective.md | 13 + docs/src/should_i_use.md | 2 +- .../tutorials/applications/power_systems.jl | 6 +- .../tutorials/nonlinear/nested_problems.jl | 14 +- .../tutorials/nonlinear/querying_hessians.jl | 4 +- .../src/tutorials/nonlinear/rocket_control.jl | 14 +- .../tutorials/nonlinear/simple_examples.jl | 10 +- .../space_shuttle_reentry_trajectory.jl | 58 +- .../tutorials/nonlinear/tips_and_tricks.jl | 16 +- .../nonlinear/user_defined_hessians.jl | 8 +- src/JuMP.jl | 1 + src/aff_expr.jl | 7 + src/complement.jl | 2 +- src/macros.jl | 74 +- src/mutable_arithmetics.jl | 2 + src/nlp_expr.jl | 575 ++++++++++-- src/operators.jl | 15 +- src/quad_expr.jl | 11 + src/variables.jl | 104 ++- test/perf/nonlinear_expr.jl | 440 --------- test/perf/nonlinear_expr.py | 143 --- test/runtests.jl | 3 - test/test_macros.jl | 7 - test/test_nlp.jl | 48 + test/test_nlp_expr.jl | 475 ++++++++-- test/test_operator.jl | 91 +- test/test_variable.jl | 25 +- test/utilities.jl | 19 +- 34 files changed, 1632 insertions(+), 1804 deletions(-) delete mode 100644 docs/src/manual/nlp.md create mode 100644 docs/src/manual/nlp_expr.md delete mode 100644 test/perf/nonlinear_expr.jl delete mode 100644 test/perf/nonlinear_expr.py diff --git a/.github/workflows/aqua.yml b/.github/workflows/aqua.yml index 58049f78490..8e14f591650 100644 --- a/.github/workflows/aqua.yml +++ b/.github/workflows/aqua.yml @@ -18,7 +18,6 @@ jobs: run: | using Pkg Pkg.add(PackageSpec(name="Aqua")) - Pkg.pkg"add MathOptInterface#od/nlp-expr" Pkg.develop(PackageSpec(path=pwd())) using JuMP, Aqua Aqua.test_all(JuMP; ambiguities = false) diff --git a/Project.toml b/Project.toml index e3881c63af5..0a440675a7b 100644 --- a/Project.toml +++ b/Project.toml @@ -5,10 +5,10 @@ version = "1.13.0" [deps] LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" +MacroTools = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" MutableArithmetics = "d8a4904e-b15c-11e9-3269-09a3773c0cb0" OrderedCollections = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" -Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" SnoopPrecompile = "66db9d55-30c0-4569-8b51-7e840670fc0c" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" @@ -21,6 +21,7 @@ JuMPDimensionalDataExt = "DimensionalData" [compat] DimensionalData = "0.24" +MacroTools = "0.5" MathOptInterface = "1.18" MutableArithmetics = "1.1" OrderedCollections = "1" diff --git a/docs/make.jl b/docs/make.jl index 03c07a55410..81d5f75fed4 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -342,7 +342,7 @@ const _PAGES = [ "manual/objective.md", "manual/containers.md", "manual/solutions.md", - "manual/nlp.md", + "manual/nlp_expr.md", "manual/callbacks.md", "manual/complex.md", ], @@ -503,6 +503,9 @@ function _validate_pages() continue end filename = replace(joinpath(root, file), doc_src => "") + if filename == "manual/nlp.md" + continue + end if endswith(filename, ".md") && !(filename in set) push!(missing_files, filename) end diff --git a/docs/src/manual/expressions.md b/docs/src/manual/expressions.md index d8d450372d7..23637a7d8e1 100644 --- a/docs/src/manual/expressions.md +++ b/docs/src/manual/expressions.md @@ -388,7 +388,45 @@ julia> aff = x + 1; julia> quad = x^2 + x; julia> expr = cos(x) * sin(quad) + aff -+(*(cos(x), sin(x² + x)), x + 1) +((cos(x) * sin(x² + x)) + x + 1) +``` + +### Limitations + +Some nonlinear expressions cannot be created via operator overloading. For +example, to minimize the likelihood of bugs in user-code, we have not overloaded +comparisons such as `<` and `>=` between JuMP objects: + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> x < 1 +ERROR: Cannot evaluate `<` between a variable and a number. +[...] +``` + +Instead, wrap the expression in the [`@expression`](@ref) macro: +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> expr = @expression(model, x < 1) +(x < 1) +``` + +For technical reasons, other operators that are not overloaded include `||`, +`&&`, and `ifelse`. + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> expr = @expression(model, ifelse(x < -1 || x >= 1, x^2, 0.0)) +ifelse(((x < -1) || (x >= 1)), x², 0.0) ``` ## Initializing arrays diff --git a/docs/src/manual/nlp.md b/docs/src/manual/nlp.md deleted file mode 100644 index 1eefe60c50d..00000000000 --- a/docs/src/manual/nlp.md +++ /dev/null @@ -1,849 +0,0 @@ -```@meta -CurrentModule = JuMP -DocTestSetup = quote - using JuMP -end -DocTestFilters = [r"≤|<=", r"≥|>=", r" == | = ", r" ∈ | in ", r"MathOptInterface|MOI"] -``` - -# Nonlinear Modeling - -JuMP has support for general smooth nonlinear (convex and nonconvex) -optimization problems. JuMP is able to provide exact, sparse second-order -derivatives to solvers. This information can improve solver accuracy and -performance. - -There are three main changes to solve nonlinear programs in JuMP. - * Use [`@NLobjective`](@ref) instead of [`@objective`](@ref) - * Use [`@NLconstraint`](@ref) instead of [`@constraint`](@ref) - * Use [`@NLexpression`](@ref) instead of [`@expression`](@ref) - -!!! info - There are some restrictions on what syntax you can use in the `@NLxxx` - macros. Make sure to read the [Syntax notes](@ref). - -## Set a nonlinear objective - -Use [`@NLobjective`](@ref) to set a nonlinear objective. - -```jldoctest -julia> model = Model(); - -julia> @variable(model, x[1:2]); - -julia> @NLobjective(model, Min, exp(x[1]) - sqrt(x[2])) -``` -To modify a nonlinear objective, call [`@NLobjective`](@ref) again. - -## Add a nonlinear constraint - -Use [`@NLconstraint`](@ref) to add a nonlinear constraint. - -```jldoctest nonlinear_constraint -julia> model = Model(); - -julia> @variable(model, x[1:2]); - -julia> @NLconstraint(model, exp(x[1]) <= 1) -exp(x[1]) - 1.0 ≤ 0 - -julia> @NLconstraint(model, [i = 1:2], x[i]^i >= i) -2-element Vector{NonlinearConstraintRef{ScalarShape}}: - x[1] ^ 1.0 - 1.0 ≥ 0 - x[2] ^ 2.0 - 2.0 ≥ 0 - -julia> @NLconstraint(model, con[i = 1:2], prod(x[j] for j = 1:i) == i) -2-element Vector{NonlinearConstraintRef{ScalarShape}}: - (*)(x[1]) - 1.0 = 0 - x[1] * x[2] - 2.0 = 0 -``` - -!!! info - You can only create nonlinear constraints with `<=`, `>=`, and `==`. - More general `Nonlinear`-in-`Set` constraints are not supported. - -Delete a nonlinear constraint using [`delete`](@ref): -```jldoctest nonlinear_constraint -julia> delete(model, con[1]) -``` - -## Create a nonlinear expression - -Use [`@NLexpression`](@ref) to create nonlinear expression objects. The syntax -is identical to [`@expression`](@ref), except that the expression can contain -nonlinear terms. - -```jldoctest nl_expression -julia> model = Model(); - -julia> @variable(model, x[1:2]); - -julia> expr = @NLexpression(model, exp(x[1]) + sqrt(x[2])) -subexpression[1]: exp(x[1]) + sqrt(x[2]) - -julia> my_anon_expr = @NLexpression(model, [i = 1:2], sin(x[i])) -2-element Vector{NonlinearExpression}: - subexpression[2]: sin(x[1]) - subexpression[3]: sin(x[2]) - -julia> @NLexpression(model, my_expr[i = 1:2], sin(x[i])) -2-element Vector{NonlinearExpression}: - subexpression[4]: sin(x[1]) - subexpression[5]: sin(x[2]) -``` - -Nonlinear expression can be used in [`@NLobjective`](@ref), [`@NLconstraint`](@ref), -and even nested in other [`@NLexpression`](@ref)s. - -```jldoctest nl_expression -julia> @NLobjective(model, Min, expr^2 + 1) - -julia> @NLconstraint(model, [i = 1:2], my_expr[i] <= i) -2-element Vector{NonlinearConstraintRef{ScalarShape}}: - subexpression[4] - 1.0 ≤ 0 - subexpression[5] - 2.0 ≤ 0 - -julia> @NLexpression(model, nested[i = 1:2], sin(my_expr[i])) -2-element Vector{NonlinearExpression}: - subexpression[6]: sin(subexpression[4]) - subexpression[7]: sin(subexpression[5]) -``` - -Use [`value`](@ref) to query the value of a nonlinear expression: -```jldoctest nl_expression -julia> set_start_value(x[1], 1.0) - -julia> value(start_value, nested[1]) -0.7456241416655579 - -julia> sin(sin(1.0)) -0.7456241416655579 -``` - -## Create a nonlinear parameter - -For nonlinear models only, JuMP offers a syntax for explicit "parameter" objects, -which are constants in the model that can be efficiently updated between solves. - -Nonlinear parameters are declared by using the [`@NLparameter`](@ref) macro -and may be indexed by arbitrary sets analogously to JuMP variables and -expressions. - -The initial value of the parameter must be provided on the right-hand side of -the `==` sign. - -```jldoctest nonlinear_parameters -julia> model = Model(); - -julia> @variable(model, x); - -julia> @NLparameter(model, p[i = 1:2] == i) -2-element Vector{NonlinearParameter}: - parameter[1] == 1.0 - parameter[2] == 2.0 -``` - -Create anonymous parameters using the `value` keyword: -```jldoctest nonlinear_parameters -julia> anon_parameter = @NLparameter(model, value = 1) -parameter[3] == 1.0 -``` - -!!! info - A parameter is not an optimization variable. It must be fixed to a value with - `==`. If you want a parameter that is `<=` or `>=`, create a variable instead - using [`@variable`](@ref). - -Use [`value`](@ref) and [`set_value`](@ref) to query or update the value of a -parameter. - -```jldoctest nonlinear_parameters -julia> value.(p) -2-element Vector{Float64}: - 1.0 - 2.0 - -julia> set_value(p[2], 3.0) -3.0 - -julia> value.(p) -2-element Vector{Float64}: - 1.0 - 3.0 -``` - -### When to use a parameter - -Nonlinear parameters are useful when solving nonlinear models in a sequence: - -```@example -using JuMP, Ipopt -model = Model(Ipopt.Optimizer) -set_silent(model) -@variable(model, z) -@NLparameter(model, x == 1.0) -@NLobjective(model, Min, (z - x)^2) -optimize!(model) -@show value(z) # Equals 1.0. - -# Now, update the value of x to solve a different problem. -set_value(x, 5.0) -optimize!(model) -@show value(z) # Equals 5.0 -nothing #hide -``` - -!!! info - Using nonlinear parameters can be faster than creating a new model from - scratch with updated data because JuMP is able to avoid repeating a number - of steps in processing the model before handing it off to the solver. - -## Syntax notes - -The syntax accepted in nonlinear macros is more restricted than the syntax -for linear and quadratic macros. We note some important points below. - -### Scalar operations only - -Except for the splatting syntax discussed below, all expressions -must be simple scalar operations. You cannot use `dot`, matrix-vector products, -vector slices, etc. -```jldoctest nlp_scalar_only -julia> model = Model(); - -julia> @variable(model, x[1:2]); - -julia> @variable(model, y); - -julia> c = [1, 2]; - -julia> @NLobjective(model, Min, c' * x + 3y) -ERROR: Unexpected array [1 2] in nonlinear expression. Nonlinear expressions may contain only scalar expressions. -[...] -``` - -Translate vector operations into explicit `sum()` operations: -```jldoctest nlp_scalar_only -julia> @NLobjective(model, Min, sum(c[i] * x[i] for i = 1:2) + 3y) -``` - -Or use an [`@expression`](@ref): -```jldoctest nlp_scalar_only -julia> @expression(model, expr, c' * x) -x[1] + 2 x[2] - -julia> @NLobjective(model, Min, expr + 3y) - -``` - -### Splatting - -The [splatting operator](https://docs.julialang.org/en/v1/manual/faq/#...-splits-one-argument-into-many-different-arguments-in-function-calls-1) - `...` is recognized in a very restricted setting for expanding function - arguments. The expression splatted can be *only* a symbol. More complex - expressions are not recognized. - -```jldoctest; filter=r"≤|<=" -julia> model = Model(); - -julia> @variable(model, x[1:3]); - -julia> @NLconstraint(model, *(x...) <= 1.0) -x[1] * x[2] * x[3] - 1.0 ≤ 0 - -julia> @NLconstraint(model, *((x / 2)...) <= 0.0) -ERROR: Unsupported use of the splatting operator. JuMP supports splatting only symbols. For example, `x...` is ok, but `(x + 1)...`, `[x; y]...` and `g(f(y)...)` are not. -``` - -## User-defined Functions - -JuMP natively supports the set of univariate and multivariate functions recognized by the -`MOI.Nonlinear` submodule. In addition to this list of functions, it is possible -to register custom *user-defined* nonlinear functions. User-defined functions -can be used anywhere in [`@NLobjective`](@ref), [`@NLconstraint`](@ref), and -[`@NLexpression`](@ref). - -JuMP will attempt to automatically register functions it detects in your -nonlinear expressions, which usually means manually registering a function is -not needed. Two exceptions are if you want to provide custom derivatives, or if -the function is not available in the scope of the nonlinear expression. - -!!! warning - User-defined functions must return a scalar output. For a work-around, see - [User-defined functions with vector outputs](@ref). - -### Automatic differentiation - -JuMP does not support black-box optimization, so all user-defined functions must -provide derivatives in some form. Fortunately, JuMP supports **automatic -differentiation of user-defined functions**, a feature to our knowledge not -available in any comparable modeling systems. - -!!! info - Automatic differentiation is *not* finite differencing. JuMP's automatically - computed derivatives are not subject to approximation error. - -JuMP uses [ForwardDiff.jl](https://github.com/JuliaDiff/ForwardDiff.jl) to -perform automatic differentiation; see the ForwardDiff.jl -[documentation](https://www.juliadiff.org/ForwardDiff.jl/v0.10.2/user/limitations.html) -for a description of how to write a function suitable for automatic -differentiation. - -#### Common mistakes when writing a user-defined function - -!!! warning - Get an error like `No method matching Float64(::ForwardDiff.Dual)`? Read - this section, and see the guidelines at [ForwardDiff.jl](https://www.juliadiff.org/ForwardDiff.jl/release-0.10/user/limitations.html). - -The most common error is that your user-defined function is not generic with -respect to the number type, that is, don't assume that the input to the function -is `Float64`. -```julia -f(x::Float64) = 2 * x # This will not work. -f(x::Real) = 2 * x # This is good. -f(x) = 2 * x # This is also good. -``` - -Another reason you may encounter this error is if you create arrays inside -your function which are `Float64`. -```julia -function bad_f(x...) - y = zeros(length(x)) # This constructs an array of `Float64`! - for i = 1:length(x) - y[i] = x[i]^i - end - return sum(y) -end - -function good_f(x::T...) where {T<:Real} - y = zeros(T, length(x)) # Construct an array of type `T` instead! - for i = 1:length(x) - y[i] = x[i]^i - end - return sum(y) -end -``` - -### Register a function - -To register a user-defined function with derivatives computed by -automatic differentiation, use the [`register`](@ref) method as in the following -example: - -```@example -using JuMP #hide -square(x) = x^2 -f(x, y) = (x - 1)^2 + (y - 2)^2 - -model = Model() - -register(model, :square, 1, square; autodiff = true) -register(model, :my_f, 2, f; autodiff = true) - -@variable(model, x[1:2] >= 0.5) -@NLobjective(model, Min, my_f(x[1], square(x[2]))) -``` - -The above code creates a JuMP model with the objective function -`(x[1] - 1)^2 + (x[2]^2 - 2)^2`. The arguments to [`register`](@ref) are: - 1. The model for which the functions are registered. - 2. A Julia symbol object which serves as the name of the user-defined function - in JuMP expressions. - 3. The number of input arguments that the function takes. - 4. The Julia method which computes the function - 5. A flag to instruct JuMP to compute exact gradients automatically. - -!!! tip - The symbol `:my_f` doesn't have to match the name of the function `f`. - However, it's more readable if it does. Make sure you use `my_f` - and not `f` in the macros. - -!!! warning - User-defined functions cannot be re-registered and will not update if you - modify the underlying Julia function. If you want to change a user-defined - function between solves, rebuild the model or use a different name. To use - a different name programmatically, see [Raw expression input](@ref). - -### Register a function and gradient - -Forward-mode automatic differentiation as implemented by ForwardDiff.jl has a -computational cost that scales linearly with the number of input dimensions. As -such, it is not the most efficient way to compute gradients of user-defined -functions if the number of input arguments is large. In this case, users may -want to provide their own routines for evaluating gradients. - -#### Univariate functions - -For univariate functions, the gradient function `∇f` returns a number that -represents the first-order derivative: -```@example -using JuMP #hide -f(x) = x^2 -∇f(x) = 2x -model = Model() -register(model, :my_square, 1, f, ∇f; autodiff = true) -@variable(model, x >= 0) -@NLobjective(model, Min, my_square(x)) -``` -If `autodiff = true`, JuMP will use automatic differentiation to compute the -hessian. - -#### Multivariate functions - -For multivariate functions, the gradient function `∇f` must take a gradient -vector as the first argument that is filled in-place: -```@example -using JuMP #hide -f(x, y) = (x - 1)^2 + (y - 2)^2 -function ∇f(g::AbstractVector{T}, x::T, y::T) where {T} - g[1] = 2 * (x - 1) - g[2] = 2 * (y - 2) - return -end - -model = Model() -register(model, :my_square, 2, f, ∇f) -@variable(model, x[1:2] >= 0) -@NLobjective(model, Min, my_square(x[1], x[2])) -``` - -!!! warning - Make sure the first argument to `∇f` supports an `AbstractVector`, and do - not assume the input is `Float64`. - -### Register a function, gradient, and hessian - -You can also register a function with the second-order derivative information, -which is a scalar for univariate functions, and a symmetric matrix for -multivariate functions. - -#### Univariate functions - -Pass a function which returns a number representing the second-order derivative: -```@example -using JuMP #hide -f(x) = x^2 -∇f(x) = 2x -∇²f(x) = 2 -model = Model() -register(model, :my_square, 1, f, ∇f, ∇²f) -@variable(model, x >= 0) -@NLobjective(model, Min, my_square(x)) -``` - -#### Multivariate functions - -For multivariate functions, the hessian function `∇²f` must take an -`AbstractMatrix` as the first argument, the lower-triangular of which is filled -in-place: -```@example -using JuMP #hide -f(x...) = (1 - x[1])^2 + 100 * (x[2] - x[1]^2)^2 -function ∇f(g, x...) - g[1] = 400 * x[1]^3 - 400 * x[1] * x[2] + 2 * x[1] - 2 - g[2] = 200 * (x[2] - x[1]^2) - return -end -function ∇²f(H, x...) - H[1, 1] = 1200 * x[1]^2 - 400 * x[2] + 2 - # H[1, 2] = -400 * x[1] <-- Not needed. Fill the lower-triangular only. - H[2, 1] = -400 * x[1] - H[2, 2] = 200.0 - return -end - -model = Model() -register(model, :rosenbrock, 2, f, ∇f, ∇²f) -@variable(model, x[1:2]) -@NLobjective(model, Min, rosenbrock(x[1], x[2])) -``` - -!!! warning - You may assume the Hessian matrix `H` is initialized with zeros, and because - `H` is symmetric, you need only to fill in the non-zero of the - lower-triangular terms. The matrix type passed in as `H` depends on the - automatic differentiation system, so make sure the first argument to the - Hessian function supports an `AbstractMatrix` (it may be something other - than `Matrix{Float64}`). However, you may assume only that `H` supports - `size(H)` and `setindex!`. Finally, the matrix is treated as dense, so the - performance will be poor on functions with high-dimensional input. - -### User-defined functions with vector inputs - -User-defined functions which take vectors as input arguments (for example, -`f(x::Vector)`) are *not* supported. Instead, use Julia's splatting syntax to -create a function with scalar arguments. For example, instead of -```julia -f(x::Vector) = sum(x[i]^i for i in 1:length(x)) -``` -define: -```julia -f(x...) = sum(x[i]^i for i in 1:length(x)) -``` - -This function `f` can be used in a JuMP model as follows: -```@example -using JuMP #hide -model = Model() -@variable(model, x[1:5] >= 0) -f(x...) = sum(x[i]^i for i in 1:length(x)) -register(model, :f, 5, f; autodiff = true) -@NLobjective(model, Min, f(x...)) -``` - -!!! tip - Make sure to read the syntax restrictions of [Splatting](@ref). - -## Factors affecting solution time - -The execution time when solving a nonlinear programming problem can be divided -into two parts, the time spent in the optimization algorithm (the solver) and -the time spent evaluating the nonlinear functions and corresponding derivatives. -Ipopt explicitly displays these two timings in its output, for example: - -``` -Total CPU secs in IPOPT (w/o function evaluations) = 7.412 -Total CPU secs in NLP function evaluations = 2.083 -``` - -For Ipopt in particular, one can improve the performance by installing advanced -sparse linear algebra packages, see [Installation Guide](@ref). For other -solvers, see their respective documentation for performance tips. - -The function evaluation time, on the other hand, is the responsibility of the -modeling language. JuMP computes derivatives by using reverse-mode automatic -differentiation with graph coloring methods for exploiting sparsity of the -Hessian matrix. As a conservative bound, JuMP's performance here currently -may be expected to be within a factor of 5 of AMPL's. Our [paper in -SIAM Review](https://mlubin.github.io/pdf/jump-sirev.pdf) has more details. - -## Querying derivatives from a JuMP model - -For some advanced use cases, one may want to directly query the derivatives of a -JuMP model instead of handing the problem off to a solver. -Internally, JuMP implements the [`MOI.AbstractNLPEvaluator`](@ref) interface. To -obtain an NLP evaluator object from a JuMP model, use [`NLPEvaluator`](@ref). -[`index`](@ref) returns the [`MOI.VariableIndex`](@ref) corresponding to a JuMP -variable. `MOI.VariableIndex` itself is a type-safe wrapper for `Int64` (stored -in the `.value` field.) - -For example: - -```jldoctest derivatives -julia> raw_index(v::MOI.VariableIndex) = v.value -raw_index (generic function with 1 method) - -julia> model = Model(); - -julia> @variable(model, x) -x - -julia> @variable(model, y) -y - -julia> @NLobjective(model, Min, sin(x) + sin(y)) - -julia> values = zeros(2) -2-element Vector{Float64}: - 0.0 - 0.0 - -julia> x_index = raw_index(JuMP.index(x)) -1 - -julia> y_index = raw_index(JuMP.index(y)) -2 - -julia> values[x_index] = 2.0 -2.0 - -julia> values[y_index] = 3.0 -3.0 - -julia> d = NLPEvaluator(model) -Nonlinear.Evaluator with available features: - * :Grad - * :Jac - * :JacVec - * :Hess - * :HessVec - * :ExprGraph - -julia> MOI.initialize(d, [:Grad]) - -julia> MOI.eval_objective(d, values) -1.0504174348855488 - -julia> sin(2.0) + sin(3.0) -1.0504174348855488 - -julia> ∇f = zeros(2) -2-element Vector{Float64}: - 0.0 - 0.0 - -julia> MOI.eval_objective_gradient(d, ∇f, values) - -julia> ∇f[x_index], ∇f[y_index] -(-0.4161468365471424, -0.9899924966004454) - -julia> cos(2.0), cos(3.0) -(-0.4161468365471424, -0.9899924966004454) -``` - -Only nonlinear constraints (those added with [`@NLconstraint`](@ref)), and -nonlinear objectives (added with [`@NLobjective`](@ref)) exist in the scope of -the [`NLPEvaluator`](@ref). - -The [`NLPEvaluator`](@ref) *does not evaluate derivatives of linear or quadratic -constraints or objectives*. - -The [`index`](@ref) method applied to a nonlinear constraint reference object -returns its index as a [`MOI.Nonlinear.ConstraintIndex`](@ref). For example: - -```jldoctest -julia> model = Model(); - -julia> @variable(model, x); - -julia> @NLconstraint(model, cons1, sin(x) <= 1); - -julia> @NLconstraint(model, cons2, x + 5 == 10); - -julia> typeof(cons1) -NonlinearConstraintRef{ScalarShape} (alias for ConstraintRef{GenericModel{Float64}, MathOptInterface.Nonlinear.ConstraintIndex, ScalarShape}) - -julia> index(cons1) -MathOptInterface.Nonlinear.ConstraintIndex(1) - -julia> index(cons2) -MathOptInterface.Nonlinear.ConstraintIndex(2) -``` - -```@meta -# TODO: Provide a link for how to access the linear and quadratic parts of the -# model. -``` - -Note that for one-sided nonlinear constraints, JuMP subtracts any values on the -right-hand side when computing expressions. In other words, one-sided nonlinear -constraints are always transformed to have a right-hand side of zero. - -This method of querying derivatives directly from a JuMP model is convenient for -interacting with the model in a structured way, for example, for accessing derivatives -of specific variables. For example, in statistical maximum likelihood estimation -problems, one is often interested in the Hessian matrix at the optimal solution, -which can be queried using the [`NLPEvaluator`](@ref). - -## Raw expression input - -!!! warning - This section requires advanced knowledge of Julia's `Expr`. You should read - the [Expressions and evaluation](https://docs.julialang.org/en/v1/manual/metaprogramming/#Expressions-and-evaluation) - section of the Julia documentation first. - -In addition to the [`@NLexpression`](@ref), [`@NLobjective`](@ref) and -[`@NLconstraint`](@ref) macros, it is also possible to provide Julia `Expr` -objects directly by using [`add_nonlinear_expression`](@ref), -[`set_nonlinear_objective`](@ref) and [`add_nonlinear_constraint`](@ref). - -This input form may be useful if the expressions are generated programmatically, -or if you experience compilation issues with the macro input (see -[Known performance issues](@ref) for more information). - -### Add a nonlinear expression - -Use [`add_nonlinear_expression`](@ref) to add a nonlinear expression to the model. - -```jldoctest -julia> model = Model(); - -julia> @variable(model, x) -x - -julia> expr = :($(x) + sin($(x)^2)) -:(x + sin(x ^ 2)) - -julia> expr_ref = add_nonlinear_expression(model, expr) -subexpression[1]: x + sin(x ^ 2.0) -``` -This is equivalent to -```jldoctest -julia> model = Model(); - -julia> @variable(model, x); - -julia> expr_ref = @NLexpression(model, x + sin(x^2)) -subexpression[1]: x + sin(x ^ 2.0) -``` - -!!! note - You must interpolate the variables directly into the expression `expr`. - -### Set the objective function - -Use [`set_nonlinear_objective`](@ref) to set a nonlinear objective. - -```jldoctest -julia> model = Model(); - -julia> @variable(model, x); - -julia> expr = :($(x) + $(x)^2) -:(x + x ^ 2) - -julia> set_nonlinear_objective(model, MIN_SENSE, expr) -``` -This is equivalent to -```jldoctest -julia> model = Model(); - -julia> @variable(model, x); - -julia> @NLobjective(model, Min, x + x^2) -``` - -!!! note - You must use `MIN_SENSE` or `MAX_SENSE` instead of `Min` and `Max`. - -### Add a constraint - -Use [`add_nonlinear_constraint`](@ref) to add a nonlinear constraint. - -```jldoctest -julia> model = Model(); - -julia> @variable(model, x); - -julia> expr = :($(x) + $(x)^2) -:(x + x ^ 2) - -julia> add_nonlinear_constraint(model, :($(expr) <= 1)) -(x + x ^ 2.0) - 1.0 ≤ 0 -``` - -This is equivalent to -```jldoctest -julia> model = Model(); - -julia> @variable(model, x); - -julia> @NLconstraint(model, Min, x + x^2 <= 1) -(x + x ^ 2.0) - 1.0 ≤ 0 -``` - -### More complicated examples - -Raw expression input is most useful when the expressions are generated -programmatically, often in conjunction with user-defined functions. - -As an example, we construct a model with the nonlinear constraints `f(x) <= 1`, -where `f(x) = x^2` and `f(x) = sin(x)^2`: -```jldoctest -julia> function main(functions::Vector{Function}) - model = Model() - @variable(model, x) - for (i, f) in enumerate(functions) - f_sym = Symbol("f_$(i)") - register(model, f_sym, 1, f; autodiff = true) - add_nonlinear_constraint(model, :($(f_sym)($(x)) <= 1)) - end - print(model) - return - end -main (generic function with 1 method) - -julia> main([x -> x^2, x -> sin(x)^2]) -Feasibility -Subject to - f_1(x) - 1.0 ≤ 0 - f_2(x) - 1.0 ≤ 0 -``` - -As another example, we construct a model with the constraint -`x^2 + sin(x)^2 <= 1`: -```jldoctest -julia> function main(functions::Vector{Function}) - model = Model() - @variable(model, x) - expr = Expr(:call, :+) - for (i, f) in enumerate(functions) - f_sym = Symbol("f_$(i)") - register(model, f_sym, 1, f; autodiff = true) - push!(expr.args, :($(f_sym)($(x)))) - end - add_nonlinear_constraint(model, :($(expr) <= 1)) - print(model) - return - end -main (generic function with 1 method) - -julia> main([x -> x^2, x -> sin(x)^2]) -Feasibility -Subject to - (f_1(x) + f_2(x)) - 1.0 ≤ 0 -``` - -### Registered functions with a variable number of arguments - -User defined functions require a fixed number of input arguments. However, -sometimes you will want to use a registered function like: -```jldoctest nlp_register_variable_arguments -julia> f(x...) = sum(exp(x[i]^2) for i in 1:length(x)); -``` -with different numbers of arguments. - -The solution is to register the same function `f` for each unique number of -input arguments, making sure to use a unique name each time. For example: - -```jldoctest nlp_register_variable_arguments -julia> A = [[1], [1, 2], [2, 3, 4], [1, 3, 4, 5]]; - -julia> model = Model(); - -julia> @variable(model, x[1:5]); - -julia> funcs = Set{Symbol}(); - -julia> for a in A - key = Symbol("f$(length(a))") - if !(key in funcs) - push!(funcs, key) - register(model, key, length(a), f; autodiff = true) - end - add_nonlinear_constraint(model, :($key($(x[a]...)) <= 1)) - end - -julia> print(model) -Feasibility -Subject to - f1(x[1]) - 1.0 ≤ 0 - f2(x[1], x[2]) - 1.0 ≤ 0 - f3(x[2], x[3], x[4]) - 1.0 ≤ 0 - f4(x[1], x[3], x[4], x[5]) - 1.0 ≤ 0 -``` - -## Known performance issues - -The macro-based input to JuMP's nonlinear interface can cause a performance -issue if you: - - 1. write a macro with a large number (hundreds) of terms - 2. call that macro from within a function instead of from the top-level in - global scope. - -The first issue does not depend on the number of resulting terms in the -mathematical expression, but rather the number of terms in the Julia `Expr` -representation of that expression. For example, the expression -`sum(x[i] for i in 1:1_000_000)` contains one million mathematical terms, but -the `Expr` representation is just a single sum. - -The most common cause, other than a lot of tedious typing, is if you write a -program that automatically writes a JuMP model as a text file, which you later -execute. One example is [MINLPlib.jl](https://github.com/lanl-ansi/MINLPLib.jl) -which automatically transpiled models in the GAMS scalar format into JuMP -examples. - -As a rule of thumb, if you are writing programs to automatically generate -expressions for the JuMP macros, you should target the [Raw expression input](@ref) -instead. For more information, read [MathOptInterface Issue#1997](https://github.com/jump-dev/MathOptInterface.jl/issues/1997). diff --git a/docs/src/manual/nlp_expr.md b/docs/src/manual/nlp_expr.md new file mode 100644 index 00000000000..d2fa7c754ad --- /dev/null +++ b/docs/src/manual/nlp_expr.md @@ -0,0 +1,351 @@ +```@meta +CurrentModule = JuMP +DocTestSetup = quote + using JuMP +end +DocTestFilters = [r"≤|<=", r"≥|>=", r" == | = ", r" ∈ | in ", r"MathOptInterface|MOI"] +``` + +# Nonlinear Modeling + +JuMP has support for general smooth nonlinear (convex and nonconvex) +optimization problems. JuMP is able to provide exact, sparse second-order +derivatives to solvers. This information can improve solver accuracy and +performance. + +## Set a nonlinear objective + +Use [`@objective`](@ref) to set a nonlinear objective. + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x[1:2]); + +julia> @objective(model, Min, exp(x[1]) - sqrt(x[2])) +(exp(x[1]) - sqrt(x[2])) +``` + +To modify a nonlinear objective, call [`@objective`](@ref) again. + +## Add a nonlinear constraint + +Use [`@constraint`](@ref) to add a nonlinear constraint. + +```jldoctest nonlinear_constraint +julia> model = Model(); + +julia> @variable(model, x[1:2]); + +julia> @constraint(model, exp(x[1]) <= 1) +(exp(x[1]) - 1.0) ≤ 0 + +julia> @constraint(model, con[i = 1:2], 2^x[i] >= i) +2-element Vector{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarNonlinearFunction, MathOptInterface.GreaterThan{Float64}}, ScalarShape}}: + con[1] : ((2.0 ^ x[1]) - 1.0) ≥ 0 + con[2] : ((2.0 ^ x[2]) - 2.0) ≥ 0 +``` + +Delete a nonlinear constraint using [`delete`](@ref): +```jldoctest nonlinear_constraint +julia> delete(model, con[1]) +``` + +## Create a nonlinear expression + +Use [`@expression`](@ref) to create nonlinear expression objects. The syntax +is identical to [`@expression`](@ref), except that the expression can contain +nonlinear terms. + +```jldoctest nl_expression +julia> model = Model(); + +julia> @variable(model, x[1:2]); + +julia> expr = @expression(model, exp(x[1]) + sqrt(x[2])) +(exp(x[1]) + sqrt(x[2])) + +julia> my_anon_expr = @expression(model, [i = 1:2], sin(x[i])) +2-element Vector{NonlinearExpr{VariableRef}}: + sin(x[1]) + sin(x[2]) + +julia> @expression(model, my_expr[i = 1:2], sin(x[i])) +2-element Vector{NonlinearExpr{VariableRef}}: + sin(x[1]) + sin(x[2]) +``` + +A [`NonlinearExpr`](@ref) can be used in [`@objective`](@ref), +[`@constraint`](@ref), and even nested in other [`@expression`](@ref)s. + +```jldoctest nl_expression +julia> @objective(model, Min, expr^2 + 1) +(((exp(x[1]) + sqrt(x[2])) ^ 2.0) + 1.0) + +julia> @constraint(model, [i = 1:2], my_expr[i] <= i) +2-element Vector{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarNonlinearFunction, MathOptInterface.LessThan{Float64}}, ScalarShape}}: + (sin(x[1]) - 1.0) ≤ 0 + (sin(x[2]) - 2.0) ≤ 0 + +julia> @expression(model, nested[i = 1:2], sin(my_expr[i])) +2-element Vector{NonlinearExpr{VariableRef}}: + sin(sin(x[1])) + sin(sin(x[2])) +``` + +Use [`value`](@ref) to query the value of a nonlinear expression: + +```jldoctest nl_expression +julia> set_start_value(x[1], 1.0) + +julia> value(start_value, nested[1]) +0.7456241416655579 + +julia> sin(sin(1.0)) +0.7456241416655579 +``` + +## User-defined functions + +In addition to a standard list of univariate and multivariate functions +recognized by the `MOI.Nonlinear` submodule, JuMP supports *user-defined* +Julia functions. + +!!! warning + User-defined functions must return a scalar output. For a work-around, see + [User-defined functions with vector outputs](@ref). + +### Register a function + +Register a user-defined function using the [`@register`](@ref) macro: + +```@repl +using JuMP +square(x) = x^2 +f(x, y) = (x - 1)^2 + (y - 2)^2 +model = Model(); +@register(model, my_square, 1, square) +@register(model, my_f, 2, f) +@variable(model, x[1:2]); +@objective(model, Min, my_f(x[1], my_square(x[2]))) +``` + +The arguments to [`@register`](@ref) are: + + 1. The model in which the function is registered. + 2. A Julia symbol object which serves as the name of the user-defined function + in JuMP expressions. This name must not be the same as that of the function. + 3. The number of scalar input arguments that the function takes. + 4. A Julia method which computes the function. + +!!! warning + User-defined functions cannot be re-registered and will not update if you + modify the underlying Julia function. If you want to change a user-defined + function between solves, rebuild the model or use a different name. + +### Registered functions without macros + +The [`@register`](@ref) macro is syntactic sugar for the +[`add_user_defined_function`](@ref) method. Thus, the non-macro version of the +preceding example is: + +```@repl +using JuMP +square(x) = x^2 +f(x, y) = (x - 1)^2 + (y - 2)^2 +model = Model(); +my_square = add_user_defined_function(model, :my_square, 1, square) +my_f = add_user_defined_function(model, :my_f, 2, f) +@variable(model, x[1:2]); +@objective(model, Min, my_f(x[1], my_square(x[2]))) +``` + +This has two important consequences. + +First, you cannot register a user-defined function with the same name as an +existing function. For example, a call to [`@register`](@ref) like: +```julia +julia> @register(model, square, 1, square) +``` +will error because it is equivalent to: +```julia +julia> square = add_user_defined_function(model, :square, 1, square) +ERROR: invalid redefinition of constant square +Stacktrace: +[...] +``` +and `square` already exists as a Julia function. + +Second, you can construct and use [`UserDefinedFunction`](@ref)s outside the +macros. + +```@repl +using JuMP +square(x) = x^2 +model = Model(); +@register(model, my_square, 1, square) +@variable(model, x) +typeof(my_square) +x_squared = my_square(x) +typeof(x_squared) +my_square_2 = UserDefinedFunction(:my_square) +my_square_2(x_squared) +``` + +### Register gradients and Hessians + +By default, JuMP will use automatic differentiation to compute the gradient and +Hessian of user-defined functions. If your function is not amenable to +automatic differentiation, or you can compute analytic derivatives, you may pass +additional arguments to [`@register`](@ref) to compute the first- and +second-derivatives. + +#### Univariate functions + +For univariate functions, a gradient function `∇f` returns a number that +represents the first-order derivative. You may, in addition, pass a third +function which returns a number representing the second-order derivative: +```@repl +using JuMP +f(x) = x^2 +∇f(x) = 2x +∇²f(x) = 2 +model = Model(); +@register(model, my_square, 1, f, ∇f, ∇²f) # Providing ∇²f is optional +@variable(model, x) +@objective(model, Min, my_square(x)) +``` + +#### Multivariate functions + +For multivariate functions, the gradient function `∇f` must take an +`AbstractVector` as the first argument that is filled in-place. The Hessian +function, `∇²f`, must take an `AbstractMatrix` as the first argument, the +lower-triangular of which is filled in-place: +```@repl +using JuMP +f(x...) = (1 - x[1])^2 + 100 * (x[2] - x[1]^2)^2 +function ∇f(g::AbstractVector{T}, x::T...) where {T} + g[1] = 400 * x[1]^3 - 400 * x[1] * x[2] + 2 * x[1] - 2 + g[2] = 200 * (x[2] - x[1]^2) + return +end +function ∇²f(H::AbstractMatrix{T}, x::T...) where {T} + H[1, 1] = 1200 * x[1]^2 - 400 * x[2] + 2 + # H[1, 2] = -400 * x[1] <-- Not needed. Fill the lower-triangular only. + H[2, 1] = -400 * x[1] + H[2, 2] = 200.0 + return +end +model = Model(); +@register(model, rosenbrock, 2, f, ∇f, ∇²f) # Providing ∇²f is optional +@variable(model, x[1:2]) +@objective(model, Min, rosenbrock(x[1], x[2])) +``` + +You may assume the Hessian matrix `H` is initialized with zeros, and because `H` +is symmetric, you need only to fill in the non-zero of the lower-triangular +terms. The matrix type passed in as `H` depends on the automatic differentiation +system, so make sure the first argument to the Hessian function supports an +`AbstractMatrix` (it may be something other than `Matrix{Float64}`). Moreover, +you may assume only that `H` supports `size(H)` and `setindex!`. Finally, the +matrix is treated as dense, so the performance will be poor on functions with +high-dimensional input. + +### User-defined functions with vector inputs + +User-defined functions which take vectors as input arguments (for example, +`f(x::Vector)`) are *not* supported. Instead, use Julia's splatting syntax to +create a function with scalar arguments. For example, instead of: +```julia +f(x::Vector) = sum(x[i]^i for i in 1:length(x)) +``` +define: +```julia +f(x...) = sum(x[i]^i for i in 1:length(x)) +``` + +Another approach is to define the splatted function as an anonymous function: +```@repl +using JuMP +model = Model(); +@variable(model, x[1:5]) +f(x::Vector) = sum(x[i]^i for i in 1:length(x)) +@register(model, my_f, 5, (x...) -> f(collect(x))) +@objective(model, Min, my_f(x...)) +``` + +### Automatic differentiation + +JuMP does not support black-box optimization, so all user-defined functions must +provide derivatives in some form. Fortunately, JuMP supports automatic +differentiation of user-defined functions. + +!!! info + Automatic differentiation is *not* finite differencing. JuMP's automatically + computed derivatives are not subject to approximation error. + +JuMP uses [ForwardDiff.jl](https://github.com/JuliaDiff/ForwardDiff.jl) to +perform automatic differentiation; see the ForwardDiff.jl +[documentation](https://www.juliadiff.org/ForwardDiff.jl/v0.10.2/user/limitations.html) +for a description of how to write a function suitable for automatic +differentiation. + +#### Common mistakes when writing a user-defined function + +!!! warning + Get an error like `No method matching Float64(::ForwardDiff.Dual)`? Read + this section, and see the guidelines at [ForwardDiff.jl](https://www.juliadiff.org/ForwardDiff.jl/release-0.10/user/limitations.html). + +The most common error is that your user-defined function is not generic with +respect to the number type, that is, don't assume that the input to the function +is `Float64`. +```julia +f(x::Float64) = 2 * x # This will not work. +f(x::Real) = 2 * x # This is good. +f(x) = 2 * x # This is also good. +``` + +Another reason you may encounter this error is if you create arrays inside +your function which are `Float64`. +```julia +function bad_f(x...) + y = zeros(length(x)) # This constructs an array of `Float64`! + for i = 1:length(x) + y[i] = x[i]^i + end + return sum(y) +end + +function good_f(x::T...) where {T<:Real} + y = zeros(T, length(x)) # Construct an array of type `T` instead! + for i = 1:length(x) + y[i] = x[i]^i + end + return sum(y) +end +``` + +## Factors affecting solution time + +The execution time when solving a nonlinear programming problem can be divided +into two parts, the time spent in the optimization algorithm (the solver) and +the time spent evaluating the nonlinear functions and corresponding derivatives. +Ipopt explicitly displays these two timings in its output, for example: + +``` +Total CPU secs in IPOPT (w/o function evaluations) = 7.412 +Total CPU secs in NLP function evaluations = 2.083 +``` + +For Ipopt in particular, one can improve the performance by installing advanced +sparse linear algebra packages, see [Installation Guide](@ref). For other +solvers, see their respective documentation for performance tips. + +The function evaluation time, on the other hand, is the responsibility of the +modeling language. JuMP computes derivatives by using reverse-mode automatic +differentiation with graph coloring methods for exploiting sparsity of the +Hessian matrix. As a conservative bound, JuMP's performance here currently +may be expected to be within a factor of 5 of AMPL's. Our [paper in +SIAM Review](https://mlubin.github.io/pdf/jump-sirev.pdf) has more details. diff --git a/docs/src/manual/objective.md b/docs/src/manual/objective.md index 140f7157ae1..8c4e5d3e2be 100644 --- a/docs/src/manual/objective.md +++ b/docs/src/manual/objective.md @@ -64,6 +64,19 @@ julia> @objective(model, Max, x * y + x + y) x*y + x + y ``` +## Set a nonlinear objective + +Use the [`@objective`](@ref) macro to set a nonlinear objective function: + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x <= 1); + +julia> @objective(model, Max, log(x)) +log(x) +``` + ## Query the objective function Use [`objective_function`](@ref) to return the current objective function. diff --git a/docs/src/should_i_use.md b/docs/src/should_i_use.md index 67081ac77e1..aeb68d99c0e 100644 --- a/docs/src/should_i_use.md +++ b/docs/src/should_i_use.md @@ -79,7 +79,7 @@ consider using other packages such as: JuMP does support nonlinear programs with constraints and objectives containing user-defined functions. However, the functions must be automatically differentiable, or need to provide explicit derivatives. (See -[User-defined Functions](@ref) for more information.) +[User-defined functions](@ref) for more information.) If your function is a black-box that is non-differentiable (for example, it is the output of a simulation written in C++), JuMP is not the right tool for the diff --git a/docs/src/tutorials/applications/power_systems.jl b/docs/src/tutorials/applications/power_systems.jl index 983075c8c0b..86073fb59b1 100644 --- a/docs/src/tutorials/applications/power_systems.jl +++ b/docs/src/tutorials/applications/power_systems.jl @@ -513,17 +513,17 @@ function solve_nonlinear_economic_dispatch( if silent set_silent(model) end - register(model, :tcf, 1, thermal_cost_function; autodiff = true) + @register(model, tcf, 1, thermal_cost_function) N = length(generators) @variable(model, generators[i].min <= g[i = 1:N] <= generators[i].max) @variable(model, 0 <= w <= scenario.wind) - @NLobjective( + @objective( model, Min, sum(generators[i].variable_cost * tcf(g[i]) for i in 1:N) + wind.variable_cost * w, ) - @NLconstraint(model, sum(g[i] for i in 1:N) + sqrt(w) == scenario.demand) + @constraint(model, sum(g[i] for i in 1:N) + sqrt(w) == scenario.demand) optimize!(model) return ( g = value.(g), diff --git a/docs/src/tutorials/nonlinear/nested_problems.jl b/docs/src/tutorials/nonlinear/nested_problems.jl index bf70493196a..c24b0d546b0 100644 --- a/docs/src/tutorials/nonlinear/nested_problems.jl +++ b/docs/src/tutorials/nonlinear/nested_problems.jl @@ -74,10 +74,10 @@ function solve_lower_level(x...) model = Model(Ipopt.Optimizer) set_silent(model) @variable(model, y[1:2]) - @NLobjective( + @objective( model, Max, - x[1]^2 * y[1] + x[2]^2 * y[2] - x[1] * y[1]^4 - 2 * x[2] * y[2]^4, + x[1]^2 * y[1] + x[2]^2 * y[2] - x[1] * y[1]^4.0 - 2 * x[2] * y[2]^4.0, ) @constraint(model, (y[1] - 10)^2 + (y[2] - 10)^2 <= 25) optimize!(model) @@ -141,8 +141,8 @@ end model = Model(Ipopt.Optimizer) @variable(model, x[1:2] >= 0) -register(model, :V, 2, V, ∇V, ∇²V) -@NLobjective(model, Min, x[1]^2 + x[2]^2 + V(x[1], x[2])) +@register(model, f_V, 2, V, ∇V, ∇²V) +@objective(model, Min, x[1]^2 + x[2]^2 + f_V(x[1], x[2])) optimize!(model) solution_summary(model) @@ -213,15 +213,15 @@ end model = Model(Ipopt.Optimizer) @variable(model, x[1:2] >= 0) cache = Cache(Float64[], NaN, Float64[]) -register( +@register( model, - :V, + f_V, 2, (x...) -> cached_f(cache, x...), (g, x...) -> cached_∇f(cache, g, x...), (H, x...) -> cached_∇²f(cache, H, x...), ) -@NLobjective(model, Min, x[1]^2 + x[2]^2 + V(x[1], x[2])) +@objective(model, Min, x[1]^2 + x[2]^2 + f_V(x[1], x[2])) optimize!(model) solution_summary(model) diff --git a/docs/src/tutorials/nonlinear/querying_hessians.jl b/docs/src/tutorials/nonlinear/querying_hessians.jl index 807430d8195..e70f0975d9e 100644 --- a/docs/src/tutorials/nonlinear/querying_hessians.jl +++ b/docs/src/tutorials/nonlinear/querying_hessians.jl @@ -68,8 +68,8 @@ model = Model(Ipopt.Optimizer) set_silent(model) @variable(model, x[i = 1:2], start = -i) @constraint(model, g_1, x[1]^2 <= 1) -@NLconstraint(model, g_2, (x[1] + x[2])^2 <= 2) -@NLobjective(model, Min, (1 - x[1])^2 + 100 * (x[2] - x[1]^2)^2) +@constraint(model, g_2, (x[1] + x[2])^2 <= 2) +@objective(model, Min, (1 - x[1])^2 + 100 * (x[2] - x[1]^2)^2) optimize!(model) # ## The analytic solution diff --git a/docs/src/tutorials/nonlinear/rocket_control.jl b/docs/src/tutorials/nonlinear/rocket_control.jl index 4e3d35e8000..2cf3317aaf4 100644 --- a/docs/src/tutorials/nonlinear/rocket_control.jl +++ b/docs/src/tutorials/nonlinear/rocket_control.jl @@ -120,7 +120,7 @@ fix(m[n], m_f; force = true) # ## Forces -@NLexpressions( +@expressions( rocket, begin ## Drag(h,v) = Dc v^2 exp( -hc * (h - h0) / h0 ) @@ -137,17 +137,17 @@ fix(m[n], m_f; force = true) for j in 2:n ## h' = v ## Rectangular integration - ## @NLconstraint(rocket, h[j] == h[j - 1] + Δt * v[j - 1]) + ## @constraint(rocket, h[j] == h[j - 1] + Δt * v[j - 1]) ## Trapezoidal integration - @NLconstraint(rocket, h[j] == h[j-1] + 0.5 * Δt * (v[j] + v[j-1])) + @constraint(rocket, h[j] == h[j-1] + 0.5 * Δt * (v[j] + v[j-1])) ## v' = (T-D(h,v))/m - g(h) ## Rectangular integration - ## @NLconstraint( + ## @constraint( ## rocket, ## v[j] == v[j - 1] + Δt *((T[j - 1] - drag[j - 1]) / m[j - 1] - grav[j - 1]) ## ) ## Trapezoidal integration - @NLconstraint( + @constraint( rocket, v[j] == v[j-1] + @@ -160,9 +160,9 @@ for j in 2:n ) ## m' = -T/c ## Rectangular integration - ## @NLconstraint(rocket, m[j] == m[j - 1] - Δt * T[j - 1] / c) + ## @constraint(rocket, m[j] == m[j - 1] - Δt * T[j - 1] / c) ## Trapezoidal integration - @NLconstraint(rocket, m[j] == m[j-1] - 0.5 * Δt * (T[j] + T[j-1]) / c) + @constraint(rocket, m[j] == m[j-1] - 0.5 * Δt * (T[j] + T[j-1]) / c) end # Solve for the control and state diff --git a/docs/src/tutorials/nonlinear/simple_examples.jl b/docs/src/tutorials/nonlinear/simple_examples.jl index b358b30a72e..3a8ae666cba 100644 --- a/docs/src/tutorials/nonlinear/simple_examples.jl +++ b/docs/src/tutorials/nonlinear/simple_examples.jl @@ -23,7 +23,7 @@ function example_rosenbrock() set_silent(model) @variable(model, x) @variable(model, y) - @NLobjective(model, Min, (1 - x)^2 + 100 * (y - x^2)^2) + @objective(model, Min, (1 - x)^2 + 100 * (y - x^2)^2) optimize!(model) Test.@test termination_status(model) == LOCALLY_SOLVED Test.@test primal_status(model) == FEASIBLE_POINT @@ -63,7 +63,7 @@ function example_clnlbeam() -0.05 <= x[1:(N+1)] <= 0.05 u[1:(N+1)] end) - @NLobjective( + @objective( model, Min, sum( @@ -71,7 +71,7 @@ function example_clnlbeam() 0.5 * alpha * h * (cos(t[i+1]) + cos(t[i])) for i in 1:N ), ) - @NLconstraint( + @constraint( model, [i = 1:N], x[i+1] - x[i] - 0.5 * h * (sin(t[i+1]) + sin(t[i])) == 0, @@ -109,7 +109,7 @@ function example_mle() set_silent(model) @variable(model, μ, start = 0.0) @variable(model, σ >= 0.0, start = 1.0) - @NLobjective( + @objective( model, Max, n / 2 * log(1 / (2 * π * σ^2)) - @@ -124,7 +124,7 @@ function example_mle() Test.@test value(μ) ≈ Statistics.mean(data) atol = 1e-3 Test.@test value(σ)^2 ≈ Statistics.var(data) atol = 1e-2 ## You can even do constrained MLE! - @NLconstraint(model, μ == σ^2) + @constraint(model, μ == σ^2) optimize!(model) Test.@test value(μ) ≈ value(σ)^2 println() diff --git a/docs/src/tutorials/nonlinear/space_shuttle_reentry_trajectory.jl b/docs/src/tutorials/nonlinear/space_shuttle_reentry_trajectory.jl index 64f0413e597..638c7e1d8f0 100644 --- a/docs/src/tutorials/nonlinear/space_shuttle_reentry_trajectory.jl +++ b/docs/src/tutorials/nonlinear/space_shuttle_reentry_trajectory.jl @@ -237,38 +237,34 @@ initial_guess = mapreduce(transpose, vcat, interp_linear.(1:n)) set_start_value.(all_variables(model), vec(initial_guess)) ## Functions to restore `h` and `v` to their true scale -@NLexpression(model, h[j = 1:n], scaled_h[j] * 1e5) -@NLexpression(model, v[j = 1:n], scaled_v[j] * 1e4) +@expression(model, h[j = 1:n], scaled_h[j] * 1e5) +@expression(model, v[j = 1:n], scaled_v[j] * 1e4) ## Helper functions -@NLexpression(model, c_L[j = 1:n], a₀ + a₁ * rad2deg(α[j])) -@NLexpression( - model, - c_D[j = 1:n], - b₀ + b₁ * rad2deg(α[j]) + b₂ * rad2deg(α[j])^2 -) -@NLexpression(model, ρ[j = 1:n], ρ₀ * exp(-h[j] / hᵣ)) -@NLexpression(model, D[j = 1:n], 0.5 * c_D[j] * S * ρ[j] * v[j]^2) -@NLexpression(model, L[j = 1:n], 0.5 * c_L[j] * S * ρ[j] * v[j]^2) -@NLexpression(model, r[j = 1:n], Rₑ + h[j]) -@NLexpression(model, g[j = 1:n], μ / r[j]^2) +@expression(model, c_L[j = 1:n], a₀ + a₁ * rad2deg(α[j])) +@expression(model, c_D[j = 1:n], b₀ + b₁ * rad2deg(α[j]) + b₂ * rad2deg(α[j])^2) +@expression(model, ρ[j = 1:n], ρ₀ * exp(-h[j] / hᵣ)) +@expression(model, D[j = 1:n], 0.5 * c_D[j] * S * ρ[j] * v[j]^2) +@expression(model, L[j = 1:n], 0.5 * c_L[j] * S * ρ[j] * v[j]^2) +@expression(model, r[j = 1:n], Rₑ + h[j]) +@expression(model, g[j = 1:n], μ / r[j]^2) ## Motion of the vehicle as a differential-algebraic system of equations (DAEs) -@NLexpression(model, δh[j = 1:n], v[j] * sin(γ[j])) -@NLexpression( +@expression(model, δh[j = 1:n], v[j] * sin(γ[j])) +@expression( model, δϕ[j = 1:n], (v[j] / r[j]) * cos(γ[j]) * sin(ψ[j]) / cos(θ[j]) ) -@NLexpression(model, δθ[j = 1:n], (v[j] / r[j]) * cos(γ[j]) * cos(ψ[j])) -@NLexpression(model, δv[j = 1:n], -(D[j] / m) - g[j] * sin(γ[j])) -@NLexpression( +@expression(model, δθ[j = 1:n], (v[j] / r[j]) * cos(γ[j]) * cos(ψ[j])) +@expression(model, δv[j = 1:n], -(D[j] / m) - g[j] * sin(γ[j])) +@expression( model, δγ[j = 1:n], (L[j] / (m * v[j])) * cos(β[j]) + cos(γ[j]) * ((v[j] / r[j]) - (g[j] / v[j])) ) -@NLexpression( +@expression( model, δψ[j = 1:n], (1 / (m * v[j] * cos(γ[j]))) * L[j] * sin(β[j]) + @@ -281,20 +277,20 @@ for j in 2:n if integration_rule == "rectangular" ## Rectangular integration - @NLconstraint(model, h[j] == h[i] + Δt[i] * δh[i]) - @NLconstraint(model, ϕ[j] == ϕ[i] + Δt[i] * δϕ[i]) - @NLconstraint(model, θ[j] == θ[i] + Δt[i] * δθ[i]) - @NLconstraint(model, v[j] == v[i] + Δt[i] * δv[i]) - @NLconstraint(model, γ[j] == γ[i] + Δt[i] * δγ[i]) - @NLconstraint(model, ψ[j] == ψ[i] + Δt[i] * δψ[i]) + @constraint(model, h[j] == h[i] + Δt[i] * δh[i]) + @constraint(model, ϕ[j] == ϕ[i] + Δt[i] * δϕ[i]) + @constraint(model, θ[j] == θ[i] + Δt[i] * δθ[i]) + @constraint(model, v[j] == v[i] + Δt[i] * δv[i]) + @constraint(model, γ[j] == γ[i] + Δt[i] * δγ[i]) + @constraint(model, ψ[j] == ψ[i] + Δt[i] * δψ[i]) elseif integration_rule == "trapezoidal" ## Trapezoidal integration - @NLconstraint(model, h[j] == h[i] + 0.5 * Δt[i] * (δh[j] + δh[i])) - @NLconstraint(model, ϕ[j] == ϕ[i] + 0.5 * Δt[i] * (δϕ[j] + δϕ[i])) - @NLconstraint(model, θ[j] == θ[i] + 0.5 * Δt[i] * (δθ[j] + δθ[i])) - @NLconstraint(model, v[j] == v[i] + 0.5 * Δt[i] * (δv[j] + δv[i])) - @NLconstraint(model, γ[j] == γ[i] + 0.5 * Δt[i] * (δγ[j] + δγ[i])) - @NLconstraint(model, ψ[j] == ψ[i] + 0.5 * Δt[i] * (δψ[j] + δψ[i])) + @constraint(model, h[j] == h[i] + 0.5 * Δt[i] * (δh[j] + δh[i])) + @constraint(model, ϕ[j] == ϕ[i] + 0.5 * Δt[i] * (δϕ[j] + δϕ[i])) + @constraint(model, θ[j] == θ[i] + 0.5 * Δt[i] * (δθ[j] + δθ[i])) + @constraint(model, v[j] == v[i] + 0.5 * Δt[i] * (δv[j] + δv[i])) + @constraint(model, γ[j] == γ[i] + 0.5 * Δt[i] * (δγ[j] + δγ[i])) + @constraint(model, ψ[j] == ψ[i] + 0.5 * Δt[i] * (δψ[j] + δψ[i])) else @error "Unexpected integration rule '$(integration_rule)'" end diff --git a/docs/src/tutorials/nonlinear/tips_and_tricks.jl b/docs/src/tutorials/nonlinear/tips_and_tricks.jl index f932dcdad8b..d3e6812a283 100644 --- a/docs/src/tutorials/nonlinear/tips_and_tricks.jl +++ b/docs/src/tutorials/nonlinear/tips_and_tricks.jl @@ -46,10 +46,10 @@ foo_2(x, y) = foo(x, y)[2] model = Model(Ipopt.Optimizer) set_silent(model) @variable(model, x[1:2] >= 0, start = 0.1) -register(model, :foo_1, 2, foo_1; autodiff = true) -register(model, :foo_2, 2, foo_2; autodiff = true) -@NLobjective(model, Max, foo_1(x[1], x[2])) -@NLconstraint(model, foo_2(x[1], x[2]) <= 2) +@register(model, f_foo_1, 2, foo_1) +@register(model, f_foo_2, 2, foo_2) +@objective(model, Max, f_foo_1(x[1], x[2])) +@constraint(model, f_foo_2(x[1], x[2]) <= 2) function_calls = 0 optimize!(model) Test.@test objective_value(model) ≈ √3 atol = 1e-4 @@ -114,10 +114,10 @@ println("function_calls = ", function_calls) model = Model(Ipopt.Optimizer) set_silent(model) @variable(model, x[1:2] >= 0, start = 0.1) -register(model, :foo_1, 2, memoized_foo[1]; autodiff = true) -register(model, :foo_2, 2, memoized_foo[2]; autodiff = true) -@NLobjective(model, Max, foo_1(x[1], x[2])) -@NLconstraint(model, foo_2(x[1], x[2]) <= 2) +@register(model, f_foo_1, 2, memoized_foo[1]) +@register(model, f_foo_2, 2, memoized_foo[2]) +@objective(model, Max, f_foo_1(x[1], x[2])) +@constraint(model, f_foo_2(x[1], x[2]) <= 2) function_calls = 0 optimize!(model) Test.@test objective_value(model) ≈ √3 atol = 1e-4 diff --git a/docs/src/tutorials/nonlinear/user_defined_hessians.jl b/docs/src/tutorials/nonlinear/user_defined_hessians.jl index 1672aae56a3..12f64254a4f 100644 --- a/docs/src/tutorials/nonlinear/user_defined_hessians.jl +++ b/docs/src/tutorials/nonlinear/user_defined_hessians.jl @@ -21,7 +21,7 @@ # # User-defined Hessians # In this tutorial, we explain how to write a user-defined function (see -# [User-defined Functions](@ref)) with a Hessian matrix explicitly provided by +# [User-defined functions](@ref)) with a Hessian matrix explicitly provided by # the user. # # For a more advanced example, see [Nested optimization problems](@ref). @@ -65,11 +65,11 @@ end # you may assume only that `H` supports `size(H)` and `setindex!`. # Now that we have the function, its gradient, and its Hessian, we can construct -# a JuMP model, register the function, and use it in a `@NL` macro: +# a JuMP model, register the function, and use it in a macro: model = Model(Ipopt.Optimizer) @variable(model, x[1:2]) -register(model, :rosenbrock, 2, rosenbrock, ∇rosenbrock, ∇²rosenbrock) -@NLobjective(model, Min, rosenbrock(x[1], x[2])) +@register(model, f_rosenbrock, 2, rosenbrock, ∇rosenbrock, ∇²rosenbrock) +@objective(model, Min, f_rosenbrock(x[1], x[2])) optimize!(model) solution_summary(model; verbose = true) diff --git a/src/JuMP.jl b/src/JuMP.jl index ee41cb1a075..5efab520cc4 100644 --- a/src/JuMP.jl +++ b/src/JuMP.jl @@ -19,6 +19,7 @@ module JuMP import Base.Meta: isexpr, quot import LinearAlgebra +import MacroTools import MathOptInterface as MOI import MutableArithmetics import OrderedCollections diff --git a/src/aff_expr.jl b/src/aff_expr.jl index d80f909e8ab..75634bee377 100644 --- a/src/aff_expr.jl +++ b/src/aff_expr.jl @@ -124,6 +124,13 @@ end variable_ref_type(::Type{GenericAffExpr{C,V}}) where {C,V} = V +function owner_model(x::GenericAffExpr) + if !isempty(x.terms) + return owner_model(first(keys(x.terms))) + end + return nothing +end + """ GenericAffExpr(constant::V, kv::AbstractArray{Pair{K,V}}) where {K,V} diff --git a/src/complement.jl b/src/complement.jl index f92ca45827c..74f47d37fdc 100644 --- a/src/complement.jl +++ b/src/complement.jl @@ -70,6 +70,6 @@ function parse_constraint_call( F, x, ) - f, parse_code = _MA.rewrite(F; move_factors_into_sums = false) + f, parse_code = _rewrite_expression(F) return parse_code, :(_build_complements_constraint($errorf, $f, $(esc(x)))) end diff --git a/src/macros.jl b/src/macros.jl index 0740f64c562..fc120efe225 100644 --- a/src/macros.jl +++ b/src/macros.jl @@ -474,6 +474,65 @@ function parse_constraint_head( return is_vectorized, parse_code, build_call end +_ifelse(a, x, y) = ifelse(a, x, y) +_and(x, y) = x && y +_or(x, y) = x || y +_less_than(x, y) = x < y +_greater_than(x, y) = x > y +_less_equal(x, y) = x <= y +_greater_equal(x, y) = x >= y +_equal_to(x, y) = x == y + +function _rewrite_to_jump_logic(x) + if Meta.isexpr(x, :call) + op = if x.args[1] == :ifelse + return Expr(:call, _ifelse, x.args[2:end]...) + elseif x.args[1] == :< + return Expr(:call, _less_than, x.args[2:end]...) + elseif x.args[1] == :> + return Expr(:call, _greater_than, x.args[2:end]...) + elseif x.args[1] == :<= + return Expr(:call, _less_equal, x.args[2:end]...) + elseif x.args[1] == :>= + return Expr(:call, _greater_equal, x.args[2:end]...) + elseif x.args[1] == :(==) + return Expr(:call, _equal_to, x.args[2:end]...) + end + elseif Meta.isexpr(x, :||) + return Expr(:call, _or, x.args...) + elseif Meta.isexpr(x, :&&) + return Expr(:call, _and, x.args...) + elseif Meta.isexpr(x, :comparison) + lhs = Expr(:call, x.args[2], x.args[1], x.args[3]) + rhs = Expr(:call, x.args[4], x.args[3], x.args[5]) + return Expr( + :call, + _and, + _rewrite_to_jump_logic(lhs), + _rewrite_to_jump_logic(rhs), + ) + end + return x +end + +""" + _rewrite_expression(expr) + +A helper function so that we can change how we rewrite expressions in a single +place and have it cascade to all locations in the JuMP macros that rewrite +expressions. +""" +function _rewrite_expression(expr) + new_expr = MacroTools.postwalk(_rewrite_to_jump_logic, expr) + new_aff, parse_aff = _MA.rewrite(new_expr; move_factors_into_sums = false) + ret = gensym() + code = quote + $parse_aff + $ret = $flatten($new_aff) + end + return ret, code +end + function parse_constraint_head( _error::Function, ::Val{:comparison}, @@ -501,9 +560,9 @@ function parse_constraint_head( "`$ub >= ... >= $lb`.", ) end - new_aff, parse_aff = _MA.rewrite(aff; move_factors_into_sums = false) - new_lb, parse_lb = _MA.rewrite(lb; move_factors_into_sums = false) - new_ub, parse_ub = _MA.rewrite(ub; move_factors_into_sums = false) + new_aff, parse_aff = _rewrite_expression(aff) + new_lb, parse_lb = _rewrite_expression(lb) + new_ub, parse_ub = _rewrite_expression(ub) parse_code = quote $parse_aff $parse_lb @@ -584,7 +643,7 @@ function parse_constraint_call( func, set, ) - f, parse_code = _MA.rewrite(func; move_factors_into_sums = false) + f, parse_code = _rewrite_expression(func) build_call = if vectorized :(build_constraint.($_error, _desparsify($f), Ref($(esc(set))))) else @@ -618,7 +677,7 @@ function parse_constraint_call( rhs, ) func = vectorized ? :($lhs .- $rhs) : :($lhs - $rhs) - f, parse_code = _MA.rewrite(func; move_factors_into_sums = false) + f, parse_code = _rewrite_expression(func) set = operator_to_set(_error, operator) # `_functionize` deals with the pathological case where the `lhs` is a # `VariableRef` and the `rhs` is a summation with no terms. @@ -1590,7 +1649,7 @@ macro objective(model, args...) end sense, x = args sense_expr = _moi_sense(_error, sense) - newaff, parsecode = _MA.rewrite(x; move_factors_into_sums = false) + newaff, parsecode = _rewrite_expression(x) code = quote $parsecode # Don't leak a `_MA.Zero` if the objective expression is an empty @@ -1679,8 +1738,9 @@ macro expression(args...) "different name for the index.", ) end - code = _MA.rewrite_and_return(x; move_factors_into_sums = false) + expr_var, build_code = _rewrite_expression(x) code = quote + $build_code # Don't leak a `_MA.Zero` if the expression is an empty summation, or # other structure that returns `_MA.Zero()`. _replace_zero($m, $code) diff --git a/src/mutable_arithmetics.jl b/src/mutable_arithmetics.jl index a07c16587cb..3e39665afcc 100644 --- a/src/mutable_arithmetics.jl +++ b/src/mutable_arithmetics.jl @@ -286,6 +286,7 @@ end function _MA.add_mul(lhs::AbstractJuMPScalar, x::_Scalar, y::_Scalar) T = _MA.promote_operation(_MA.add_mul, typeof(lhs), typeof(x), typeof(y)) expr = _MA.operate(convert, T, lhs) + # We can't use `operate!!` here because that will cause a StackOverflow. if _MA.mutability(T) == _MA.IsMutable() return _MA.operate!(_MA.add_mul, expr, x, y) end @@ -306,6 +307,7 @@ function _MA.add_mul( typeof.(args)..., ) expr = _MA.operate(convert, T, lhs) + # We can't use `operate!!` here because that will cause a StackOverflow. if _MA.mutability(T) == _MA.IsMutable() return _MA.operate!(_MA.add_mul, expr, x, y, args...) end diff --git a/src/nlp_expr.jl b/src/nlp_expr.jl index 02e662673bf..cf80125963a 100644 --- a/src/nlp_expr.jl +++ b/src/nlp_expr.jl @@ -4,12 +4,15 @@ # file, You can obtain one at https://mozilla.org/MPL/2.0/. """ - NonlinearExpr(head::Symbol, args::Vector{Any}) - NonlinearExpr(head::Symbol, args::Any...) + NonlinearExpr{V}(head::Symbol, args::Vector{Any}) + NonlinearExpr{V}(head::Symbol, args::Any...) The scalar-valued nonlinear function `head(args...)`, represented as a symbolic expression tree, with the call operator `head` and ordered arguments in `args`. +`V` is the type of [`AbstractVariableRef`](@ref) present in the expression, and +is used to help dispatch JuMP extensions. + ## `head` The `head::Symbol` must be an operator supported by the model. @@ -32,13 +35,16 @@ querying [`MOI.ListOfSupportedNonlinearOperators`](@ref). The vector `args` contains the arguments to the nonlinear function. If the operator is univariate, it must contain one element. Otherwise, it may contain -multiple elements. Each element must be one of the following: +multiple elements. + +Given a subtype of [`AbstractVariableRef`](@ref), `V`, for `NonlinearExpr{V}`, +each element must be one of the following: - * A constant value of type `T<:Number` - * A [`VariableRef`](@ref) - * An [`AffExpr`](@ref) - * A [`QuadExpr`](@ref) - * A [`NonlinearExpr`](@ref) + * A constant value of type `<:Number` + * A `V` + * A [`GenericAffExpr{C,V}`](@ref) + * A [`GenericQuadExpr{C,V}`](@ref) + * A [`NonlinearExpr{V}`](@ref) ## Unsupported operators @@ -66,44 +72,101 @@ julia> f = NonlinearExpr(:^, NonlinearExpr(:sin, x), 2.0) ^(sin(x), 2.0) ``` """ -struct NonlinearExpr <: AbstractJuMPScalar +struct NonlinearExpr{V<:AbstractVariableRef} <: AbstractJuMPScalar head::Symbol args::Vector{Any} + + function NonlinearExpr(head::Symbol, args::Vector{Any}) + index = findfirst(Base.Fix2(isa, AbstractJuMPScalar), args) + if index === nothing + error( + "Unable to create a nonlinear expression because it did not " * + "contain any JuMP scalars. head = $head, args = $args.", + ) + end + return new{variable_ref_type(args[index])}(head, args) + end + + function NonlinearExpr{V}( + head::Symbol, + args::Vector{Any}, + ) where {V<:AbstractVariableRef} + return new{V}(head, args) + end end +variable_ref_type(::NonlinearExpr{V}) where {V} = V + # We include this method so that we can refactor the internal representation of # NonlinearExpr without having to rewrite the method overloads. -function NonlinearExpr(head::Symbol, args...) - return NonlinearExpr(head, Any[args...]) +function NonlinearExpr{V}(head::Symbol, args...) where {V<:AbstractVariableRef} + return NonlinearExpr{V}(head, Any[args...]) end Base.length(x::NonlinearExpr) = length(x.args) Base.getindex(x::NonlinearExpr, i::Int) = x.args[i] +const _PREFIX_OPERATORS = + (:+, :-, :*, :/, :^, :||, :&&, :>, :<, :(<=), :(>=), :(==)) + function function_string(::MIME"text/plain", x::NonlinearExpr) - io, stack, is_open = IOBuffer(), Any[x], true + io, stack = IOBuffer(), Any[x] while !isempty(stack) arg = pop!(stack) - if !is_open && arg != ')' - print(io, ", ") - end if arg isa NonlinearExpr - print(io, arg.head, "(") - push!(stack, ')') - for i in length(arg):-1:1 - push!(stack, arg[i]) + if arg.head in _PREFIX_OPERATORS && length(arg) > 1 + print(io, "(") + push!(stack, ")") + for i in length(arg):-1:2 + push!(stack, arg[i]) + push!(stack, " $(arg.head) ") + end + push!(stack, arg[1]) + else + print(io, arg.head, "(") + push!(stack, ")") + for i in length(arg):-1:2 + push!(stack, arg[i]) + push!(stack, ", ") + end + push!(stack, arg[1]) end else print(io, arg) end - is_open = arg isa NonlinearExpr end seekstart(io) return read(io, String) end -function function_string(::MIME"text/latex", expr::NonlinearExpr) - return "\\textsf{$(function_string(MIME("text/plain"), expr))}" +function function_string(::MIME"text/latex", x::NonlinearExpr) + io, stack = IOBuffer(), Any[x] + while !isempty(stack) + arg = pop!(stack) + if arg isa NonlinearExpr + if arg.head in _PREFIX_OPERATORS && length(arg) > 1 + print(io, "\\left({") + push!(stack, "}\\right)") + for i in length(arg):-1:2 + push!(stack, arg[i]) + push!(stack, "} $(arg.head) {") + end + push!(stack, arg[1]) + else + print(io, "\\textsf{", arg.head, "}\\left({") + push!(stack, "}\\right)") + for i in length(arg):-1:2 + push!(stack, arg[i]) + push!(stack, "}, {") + end + push!(stack, arg[1]) + end + else + print(io, arg) + end + end + seekstart(io) + return read(io, String) end _isequal(x, y) = x == y @@ -135,15 +198,28 @@ function MOI.Nonlinear.parse_expression( return end -function _parse_without_recursion_inner(stack, data, expr, x, parent) +function _get_node_type(data, x) id = get(data.operators.univariate_operator_to_id, x.head, nothing) - node_type = if length(x) == 1 && id !== nothing - MOI.Nonlinear.NODE_CALL_UNIVARIATE - else - id = get(data.operators.multivariate_operator_to_id, x.head, nothing) - @assert id !== nothing - MOI.Nonlinear.NODE_CALL_MULTIVARIATE + if length(x) == 1 && id !== nothing + return id, MOI.Nonlinear.NODE_CALL_UNIVARIATE end + id = get(data.operators.multivariate_operator_to_id, x.head, nothing) + if id !== nothing + return id, MOI.Nonlinear.NODE_CALL_MULTIVARIATE + end + id = get(data.operators.comparison_operator_to_id, x.head, nothing) + if id !== nothing + return id, MOI.Nonlinear.NODE_COMPARISON + end + id = get(data.operators.logic_operator_to_id, x.head, nothing) + if id !== nothing + return id, MOI.Nonlinear.NODE_LOGIC + end + return throw(MOI.UnsupportedNonlinearOperator(x.head)) +end + +function _parse_without_recursion_inner(stack, data, expr, x, parent) + id, node_type = _get_node_type(data, x) push!(expr.nodes, MOI.Nonlinear.Node(node_type, id, parent)) parent = length(expr.nodes) for i in length(x):-1:1 # Args need to be pushed onto the stack in reverse @@ -154,9 +230,9 @@ end # Method definitions -Base.zero(::Type{NonlinearExpr}) = NonlinearExpr(:+, 0.0) +Base.zero(::Type{NonlinearExpr{V}}) where {V} = NonlinearExpr{V}(:+, 0.0) -Base.one(::Type{NonlinearExpr}) = NonlinearExpr(:+, 1.0) +Base.one(::Type{NonlinearExpr{V}}) where {V} = NonlinearExpr{V}(:+, 1.0) # Univariate operators @@ -165,14 +241,18 @@ for f in MOI.Nonlinear.DEFAULT_UNIVARIATE_OPERATORS if f == :+ continue # We don't need this. elseif f == :- - @eval Base.:-(x::NonlinearExpr) = NonlinearExpr(:-, x) + @eval Base.:-(x::NonlinearExpr{V}) where {V} = NonlinearExpr{V}(:-, x) elseif isdefined(Base, f) - @eval Base.$(f)(x::AbstractJuMPScalar) = NonlinearExpr($op, x) + @eval function Base.$(f)(x::AbstractJuMPScalar) + return NonlinearExpr{variable_ref_type(x)}($op, x) + end elseif isdefined(MOI.Nonlinear, :SpecialFunctions) # The operator is defined in some other package. SF = MOI.Nonlinear.SpecialFunctions if isdefined(SF, f) - @eval $(SF).$(f)(x::AbstractJuMPScalar) = NonlinearExpr($op, x) + @eval function $(SF).$(f)(x::AbstractJuMPScalar) + return NonlinearExpr{variable_ref_type(x)}($op, x) + end end end end @@ -190,22 +270,139 @@ for f in (:+, :-, :*, :^, :/, :atan) @eval begin function Base.$(f)(x::AbstractJuMPScalar, y::_Constant) rhs = convert(Float64, _constant_to_number(y)) - return NonlinearExpr($op, x, rhs) + return NonlinearExpr{variable_ref_type(x)}($op, x, rhs) end function Base.$(f)(x::_Constant, y::AbstractJuMPScalar) lhs = convert(Float64, _constant_to_number(x)) - return NonlinearExpr($op, lhs, y) + return NonlinearExpr{variable_ref_type(y)}($op, lhs, y) end function Base.$(f)(x::AbstractJuMPScalar, y::AbstractJuMPScalar) - return NonlinearExpr($op, x, y) + return NonlinearExpr{variable_ref_type(x)}($op, x, y) + end + end +end + +function _MA.operate!!( + ::typeof(_MA.add_mul), + x::NonlinearExpr, + y::AbstractJuMPScalar, +) + if x.head == :+ + push!(x.args, y) + return x + end + return +(x, y) +end + +""" + flatten(expr::NonlinearExpr) + +Flatten a nonlinear expression by lifting nested `+` and `*` nodes into a single +n-ary operation. + +## Motivation + +Nonlinear expressions created using operator overloading can be deeply nested +and unbalanced. For example, `prod(x for i in 1:4)` creates +`*(x, *(x, *(x, x)))` instead of the more preferable `*(x, x, x, x)`. + +## Example + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x) +x + +julia> y = prod(x for i in 1:4) +((x² * x) * x) + +julia> flatten(y) +(x² * x * x) + +julia> flatten(sin(y)) +sin((x² * x * x)) +``` +""" +function flatten(expr::NonlinearExpr{V}) where {V} + root = NonlinearExpr{V}(expr.head, Any[]) + nodes_to_visit = Any[(root, arg) for arg in reverse(expr.args)] + while !isempty(nodes_to_visit) + parent, arg = pop!(nodes_to_visit) + if !(arg isa NonlinearExpr) + # Not a nonlinear expression, so can use recursion. + push!(parent.args, flatten(arg)) + elseif parent.head in (:+, :*) && arg.head == parent.head + # A special case: the arg can be lifted to an n-ary argument of the + # parent. + for n in reverse(arg.args) + push!(nodes_to_visit, (parent, n)) + end + else + # The default case for nonlinear expressions. Put the args on the + # stack, so that we may walk them later. + for n in reverse(arg.args) + push!(nodes_to_visit, (arg, n)) + end + empty!(arg.args) + push!(parent.args, arg) + end + end + return root +end + +flatten(expr) = expr + +function _ifelse(a::AbstractJuMPScalar, x, y) + return NonlinearExpr{variable_ref_type(a)}(:ifelse, Any[a, x, y]) +end + +for (f, op) in ( + :_and => :&&, + :_or => :||, + :_less_than => :(<), + :_greater_than => :(>), + :_less_equal => :(<=), + :_greater_equal => :(>=), + :_equal_to => :(==), +) + op = Meta.quot(op) + @eval begin + function $(f)(x::AbstractJuMPScalar, y) + return NonlinearExpr{variable_ref_type(x)}($op, x, y) + end + function $(f)(x, y::AbstractJuMPScalar) + return NonlinearExpr{variable_ref_type(y)}($op, x, y) + end + function $(f)(x::AbstractJuMPScalar, y::AbstractJuMPScalar) + return NonlinearExpr{variable_ref_type(x)}($op, x, y) end end end # JuMP interop -# TODO -check_belongs_to_model(::NonlinearExpr, ::Model) = true +function owner_model(expr::NonlinearExpr) + for arg in expr.args + if !(arg isa AbstractJuMPScalar) + continue + end + model = owner_model(arg) + if model !== nothing + return model + end + end + return nothing +end + +function check_belongs_to_model(expr::NonlinearExpr, model::AbstractModel) + for arg in expr.args + if arg isa AbstractJuMPScalar + check_belongs_to_model(arg, model) + end + end + return +end function moi_function(f::NonlinearExpr) ret = MOI.ScalarNonlinearFunction(f.head, Any[]) @@ -231,7 +428,7 @@ function moi_function(f::NonlinearExpr) end function jump_function(model::Model, f::MOI.ScalarNonlinearFunction) - ret = NonlinearExpr(f.head, Any[]) + ret = NonlinearExpr{VariableRef}(f.head, Any[]) stack = Tuple{NonlinearExpr,Any}[] for arg in reverse(f.args) push!(stack, (ret, arg)) @@ -239,7 +436,7 @@ function jump_function(model::Model, f::MOI.ScalarNonlinearFunction) while !isempty(stack) parent, arg = pop!(stack) if arg isa MOI.ScalarNonlinearFunction - new_ret = NonlinearExpr(arg.head, Any[]) + new_ret = NonlinearExpr{VariableRef}(arg.head, Any[]) push!(parent.args, new_ret) for child in reverse(arg.args) push!(stack, (new_ret, child)) @@ -254,10 +451,10 @@ function jump_function(model::Model, f::MOI.ScalarNonlinearFunction) end function jump_function_type(::Model, ::Type{<:MOI.ScalarNonlinearFunction}) - return NonlinearExpr + return NonlinearExpr{VariableRef} end -moi_function_type(::Type{NonlinearExpr}) = MOI.ScalarNonlinearFunction +moi_function_type(::Type{<:NonlinearExpr}) = MOI.ScalarNonlinearFunction function constraint_object(c::NonlinearConstraintRef) nlp = nonlinear_model(c.model) @@ -273,12 +470,12 @@ function jump_function(model::Model, expr::MOI.Nonlinear.Expression) for i in length(expr.nodes):-1:1 node = expr.nodes[i] parsed[i] = if node.type == MOI.Nonlinear.NODE_CALL_UNIVARIATE - NonlinearExpr( + NonlinearExpr{VariableRef}( nlp.operators.univariate_operators[node.index], parsed[rowvals[SparseArrays.nzrange(adj, i)[1]]], ) elseif node.type == MOI.Nonlinear.NODE_CALL_MULTIVARIATE - NonlinearExpr( + NonlinearExpr{VariableRef}( nlp.operators.multivariate_operators[node.index], Any[parsed[rowvals[j]] for j in SparseArrays.nzrange(adj, i)], ) @@ -299,19 +496,92 @@ function jump_function(model::Model, expr::MOI.Nonlinear.Expression) return parsed[1] end +function value(f::Function, expr::NonlinearExpr) + return _evaluate_expr(MOI.Nonlinear.OperatorRegistry(), f, expr) +end + +function value(a::NonlinearExpr; result::Int = 1) + return value(a) do x + return value(x; result = result) + end +end + +function _evaluate_expr( + ::MOI.Nonlinear.OperatorRegistry, + f::Function, + expr::AbstractJuMPScalar, +) + return value(f, expr) +end + +function _evaluate_expr( + ::MOI.Nonlinear.OperatorRegistry, + ::Function, + expr::Real, +) + return convert(Float64, expr) +end + +function _evaluate_user_defined_function(registry, f, expr::NonlinearExpr) + model = owner_model(expr) + op, nargs = expr.head, length(expr.args) + udf = MOI.get(model, MOI.UserDefinedFunction(op, nargs)) + if udf === nothing + return error( + "Unable to evaluate nonlinear operator $op because it is not " * + "registered", + ) + end + args = [_evaluate_expr(registry, f, arg) for arg in expr.args] + return first(udf)(args...) +end + +function _evaluate_expr( + registry::MOI.Nonlinear.OperatorRegistry, + f::Function, + expr::NonlinearExpr, +) + op = expr.head + # TODO(odow): uses private function + if !MOI.Nonlinear._is_registered(registry, op, length(expr.args)) + return _evaluate_user_defined_function(registry, f, expr) + end + if length(expr.args) == 1 && haskey(registry.univariate_operator_to_id, op) + arg = _evaluate_expr(registry, f, expr.args[1]) + return MOI.Nonlinear.eval_univariate_function(registry, op, arg) + elseif haskey(registry.multivariate_operator_to_id, op) + args = [_evaluate_expr(registry, f, arg) for arg in expr.args] + return MOI.Nonlinear.eval_multivariate_function(registry, op, args) + elseif haskey(registry.logic_operator_to_id, op) + @assert length(expr.args) == 2 + x = _evaluate_expr(registry, f, expr.args[1]) + y = _evaluate_expr(registry, f, expr.args[2]) + return MOI.Nonlinear.eval_logic_function(registry, op, x, y) + else + @assert haskey(registry.comparison_operator_to_id, op) + @assert length(expr.args) == 2 + x = _evaluate_expr(registry, f, expr.args[1]) + y = _evaluate_expr(registry, f, expr.args[2]) + return MOI.Nonlinear.eval_comparison_function(registry, op, x, y) + end +end + # MutableArithmetics.jl # These converts are used in the {add,sub}mul definition for AbstractJuMPScalar. -Base.convert(::Type{NonlinearExpr}, x::AbstractVariableRef) = x +Base.convert(::Type{<:NonlinearExpr}, x::AbstractVariableRef) = x -function Base.convert(::Type{NonlinearExpr}, x::GenericAffExpr) +function Base.convert( + ::Type{<:NonlinearExpr}, + x::GenericAffExpr{C,V}, +) where {C,V} args = Any[] for (variable, coef) in x.terms if isone(coef) push!(args, variable) elseif !iszero(coef) - push!(args, NonlinearExpr(:*, coef, variable)) + push!(args, NonlinearExpr{V}(:*, coef, variable)) end end if !iszero(x.constant) || isempty(args) @@ -320,23 +590,26 @@ function Base.convert(::Type{NonlinearExpr}, x::GenericAffExpr) if length(args) == 1 return args[1] end - return NonlinearExpr(:+, args) + return NonlinearExpr{V}(:+, args) end -function Base.convert(::Type{NonlinearExpr}, x::GenericQuadExpr) +function Base.convert( + ::Type{<:NonlinearExpr}, + x::GenericQuadExpr{C,V}, +) where {C,V} args = Any[] for (variable, coef) in x.aff.terms if isone(coef) push!(args, variable) elseif !iszero(coef) - push!(args, NonlinearExpr(:*, coef, variable)) + push!(args, NonlinearExpr{V}(:*, coef, variable)) end end for (pair, coef) in x.terms if isone(coef) - push!(args, NonlinearExpr(:*, pair.a, pair.b)) + push!(args, NonlinearExpr{V}(:*, pair.a, pair.b)) elseif !iszero(coef) - push!(args, NonlinearExpr(:*, coef, pair.a, pair.b)) + push!(args, NonlinearExpr{V}(:*, coef, pair.a, pair.b)) end end if !iszero(x.aff.constant) || isempty(args) @@ -345,29 +618,203 @@ function Base.convert(::Type{NonlinearExpr}, x::GenericQuadExpr) if length(args) == 1 return args[1] end - return NonlinearExpr(:+, args) + return NonlinearExpr{V}(:+, args) end function _MA.promote_operation( ::Union{typeof(+),typeof(-),typeof(*)}, - ::Type{NonlinearExpr}, + ::Type{NonlinearExpr{V}}, ::Type{<:AbstractJuMPScalar}, -) - return NonlinearExpr +) where {V<:AbstractVariableRef} + return NonlinearExpr{V} end function _MA.promote_operation( ::Union{typeof(+),typeof(-),typeof(*)}, ::Type{<:AbstractJuMPScalar}, - ::Type{NonlinearExpr}, -) - return NonlinearExpr + ::Type{NonlinearExpr{V}}, +) where {V<:AbstractVariableRef} + return NonlinearExpr{V} end function _MA.promote_operation( ::Union{typeof(+),typeof(-),typeof(*)}, - ::Type{NonlinearExpr}, - ::Type{NonlinearExpr}, -) - return NonlinearExpr + ::Type{NonlinearExpr{V}}, + ::Type{NonlinearExpr{V}}, +) where {V<:AbstractVariableRef} + return NonlinearExpr{V} +end + +function _MA.promote_operation( + ::Union{typeof(+),typeof(-),typeof(*)}, + ::Type{NonlinearExpr{U}}, + ::Type{NonlinearExpr{V}}, +) where {U<:AbstractVariableRef,V<:AbstractVariableRef} + return error( + "Unable to promote two different types of nonlinear expression", + ) +end + +""" + UserDefinedFunction(head::Symbol) + +A struct representing a user-defined function named `head`. This function must +have already been added to the model using [`add_user_defined_function`](@ref) +or [`@register`](@ref). + +## Example + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x) +x + +julia> f(x::Float64) = x^2 +f (generic function with 1 method) + +julia> ∇f(x::Float64) = 2 * x +∇f (generic function with 1 method) + +julia> ∇²f(x::Float64) = 2.0 +∇²f (generic function with 1 method) + +julia> add_user_defined_function(model, :foo, 1, f, ∇f, ∇²f) +UserDefinedFunction(:foo) + +julia> bar = UserDefinedFunction(:foo) +UserDefinedFunction(:foo) + +julia> @objective(model, Min, bar(x)) +foo(x) +``` +""" +struct UserDefinedFunction + head::Symbol +end + +(f::UserDefinedFunction)(args...) = NonlinearExpr(f.head, Any[a for a in args]) + +""" + add_user_defined_function( + model::Model, + op::Symbol, + dim::Int, + f::Function, + [∇f::Function,] + [∇²f::Function,] + ) + +Add a user-definend function with `dim` input arguments to `model` and associate +it with the operator `op`. + +The function `f` evaluates the function. The optional function `∇f` evaluates +the first derivative, and the optional function `∇²f` evaluates the second +derivative. `∇²f` may be provided only if `∇f` is also provided. + +## Example + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x) +x + +julia> f(x::Float64) = x^2 +f (generic function with 1 method) + +julia> ∇f(x::Float64) = 2 * x +∇f (generic function with 1 method) + +julia> ∇²f(x::Float64) = 2.0 +∇²f (generic function with 1 method) + +julia> foo = add_user_defined_function(model, :foo, 1, f, ∇f, ∇²f) +UserDefinedFunction(:foo) + +julia> @objective(model, Min, foo(x)) +foo(x) +``` +""" +function add_user_defined_function( + model::Model, + op::Symbol, + dim::Int, + args::Vararg{Function,N}, +) where {N} + if !(1 <= N <= 3) + error( + "Unable to register user-defined function $op: invalid number of " * + "functions provided. Got $N, but expected 1 (if function only), " * + "2 (if function and gradient), or 3 (if function, gradient, and " * + "hesssian provided)", + ) + end + # TODO(odow): we could add other checks here, but we won't for now because + # down-stream solvers in MOI can add their own checks, and any solver using + # MOI.Nonlinear will automatically check for autodiff and common mistakes + # and throw a nice informative error. + MOI.set(model, MOI.UserDefinedFunction(op, dim), args) + return UserDefinedFunction(op) +end + +""" + @register(model, operator, dim, args...) + +Register a user-defined function in `model`, and create a new variable +[`UserDefinedFunction`](@ref) called `operator` in the current scope. + +## Example + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x) +x + +julia> f(x::Float64) = x^2 +f (generic function with 1 method) + +julia> ∇f(x::Float64) = 2 * x +∇f (generic function with 1 method) + +julia> ∇²f(x::Float64) = 2.0 +∇²f (generic function with 1 method) + +julia> @register(model, foo, 1, f, ∇f, ∇²f) +UserDefinedFunction(:foo) + +julia> @objective(model, Min, foo(x)) +foo(x) +``` + +## Non-macro version + +This macro is provided as helpful syntax that matches the style of the rest of +the JuMP macros. However, you may also create user-defined functions outside the +macros using [`add_user_defined_function`](@ref). For example: + +```julia +julia> model = Model(); + +julia> @register(model, f, 1, x -> x^2) +UserDefinedFunction(:f) +``` +is equivalent to +```julia +julia> model = Model(); + +julia> f = add_user_defined_function(model, :f, 1, x -> x^2) +UserDefinedFunction(:f) +``` +""" +macro register(model, op, args...) + rhs = Expr( + :call, + add_user_defined_function, + esc(model), + Meta.quot(op), + esc.(args)..., + ) + return Expr(:(=), esc(op), rhs) end diff --git a/src/operators.jl b/src/operators.jl index b90f61edaef..2922714d0d7 100644 --- a/src/operators.jl +++ b/src/operators.jl @@ -204,12 +204,7 @@ function Base.:^(lhs::AbstractVariableRef, rhs::Integer) elseif rhs == 0 return one(GenericQuadExpr{T,variable_ref_type(lhs)}) else - error( - "Invalid integer exponent detected in expression `$lhs^$rhs`: " * - "supported exponents are 0, 1, or 2. " * - "If you are trying to build a nonlinear problem, use `x^$rhs.0` " * - "instead of `x^$rhs`, or use `x^Float64(y)` instead of `x^y`.", - ) + return NonlinearExpr(:^, Any[lhs, rhs]) end end @@ -221,13 +216,7 @@ function Base.:^(lhs::GenericAffExpr{T}, rhs::Integer) where {T} elseif rhs == 0 return one(GenericQuadExpr{T,variable_ref_type(lhs)}) else - error( - "Invalid integer exponent `$rhs` detected on an affine " * - "expression: supported exponents are 0, 1, or 2. " * - "If you are trying to build a nonlinear problem, use " * - "`aff^$rhs.0` instead of `aff^$rhs`, or use `aff^Float64(y)` " * - "instead of `aff^y`.", - ) + return NonlinearExpr(:^, Any[lhs, rhs]) end end diff --git a/src/quad_expr.jl b/src/quad_expr.jl index 4361de2d944..9d6fbf0ac49 100644 --- a/src/quad_expr.jl +++ b/src/quad_expr.jl @@ -53,6 +53,17 @@ end variable_ref_type(::Type{GenericQuadExpr{C,V}}) where {C,V} = V +function owner_model(x::GenericQuadExpr) + model = owner_model(x.aff) + if model !== nothing + return model + elseif !isempty(x.terms) + pair = first(keys(x.terms)) + return owner_model(pair.a) + end + return nothing +end + """ GenericQuadExpr( aff::GenericAffExpr{V,K}, diff --git a/src/variables.jl b/src/variables.jl index 56a3d7bce8c..f6788d37a44 100644 --- a/src/variables.jl +++ b/src/variables.jl @@ -2008,51 +2008,67 @@ end ### Error messages for common incorrect usages ### +function _logic_error_exception(sym::Symbol) + return ErrorException( + """ +Cannot evaluate `$(sym)` between a variable and a number. + +There are three common mistakes that lead to this. + + 1. You tried to write a constraint that depends on the value of a variable, for + example: + ```julia + model = Model() + @variable(model, x[1:2]) + if x[1] $(sym) 1 + @constraint(model, x[2] == 0) + end + ``` + You cannot write a model like this. You must formulate your problem as a + single optimization problem. Unfortunately, the way to do this is + problem-specific and depends on your choice of solver. You may be able to + use indicator constraints, or some other mixed-integer linear + reformulation. If stuck, post your problem on the community forum: + https://jump.dev/forum + + 2. You wrote a function that expected the value of a variable, but passed the + variable instead. For example: + ```julia + foo(x) = x $(sym) 1 ? 0 : 1 - x + model = Model() + @variable(model, x) + @expression(model, foo(x)) + ``` + To fix, create a nonlinear model with a user-defined function: + ```julia + foo(x) = x $(sym) 1 ? 0 : 1 - x + model = Model() + @register(model, my_foo, 1, foo) + @variable(model, x) + @expression(model, my_foo(x)) + ``` + + 3. You tried to create a logical nonlinear expression outside a macro, for + example: + ```julia + model = Model() + @variable(model, x) + expr = x $sym 1 + ``` + To fix, wrap the expression in the [`@expression`](@ref) macro: + ```julia + model = Model() + @variable(model, x) + expr = @expression(model, x $sym 1) + ``` + """, + ) +end + for sym in (:(<=), :(>=), :(<), :(>)) - msg = """Cannot evaluate `$(sym)` between a variable and a number. - - There are two common mistakes that lead to this. - - * You tried to write a constraint that depends on the value of a variable - - For example: - ```julia - model = Model() - @variable(model, x[1:2]) - if x[1] $(sym) 1 - @constraint(model, x[2] == 0) - end - ``` - - You cannot write a model like this. You must formulate your problem as a - single optimization problem. Unfortunately, the way to do this is - problem-specific and depends on your choice of solver. You may be able to - use indicator constraints, or some other mixed-integer linear - reformulation. If stuck, post your problem on the community forum: - https://jump.dev/forum - - * You wrote a function that expected the value of a variable, but it was - passed the variable instead - - For example: - ```julia - foo(x) = x $(sym) 1 ? 0 : 1 - x - model = Model() - @variable(model, x) - @objective(model, foo(x)) - ``` - - To fix this, create a nonlinear model with a user-defined function: - ```julia - foo(x) = x $(sym) 1 ? 0 : 1 - x - model = Model() - register(model, :foo, 1, foo; autodiff = true) - @variable(model, x) - @NLobjective(model, foo(x)) - ``` - """ + err = _logic_error_exception(sym) @eval begin - Base.$(sym)(::GenericVariableRef, ::Number) = error($(msg)) - Base.$(sym)(::Number, ::GenericVariableRef) = error($(msg)) + Base.$(sym)(::GenericVariableRef, ::Number) = throw($err) + Base.$(sym)(::Number, ::GenericVariableRef) = throw($err) end end diff --git a/test/perf/nonlinear_expr.jl b/test/perf/nonlinear_expr.jl deleted file mode 100644 index 4be70eba943..00000000000 --- a/test/perf/nonlinear_expr.jl +++ /dev/null @@ -1,440 +0,0 @@ -module NonlinearBenchmark - -using JuMP -import BenchmarkTools -import InfiniteOpt -import Ipopt -import Random -import Symbolics - -function benchmark_group() - lookup = Dict( - "perf_nl_" => "@NL", - "perf_nlexpr_" => "NonlinearExpr", - "perf_infopt_" => "InfiniteOpt", - "perf_symbolics_" => "Symbolics", - ) - suite = BenchmarkTools.BenchmarkGroup() - for v in values(lookup) - suite[v] = BenchmarkTools.BenchmarkGroup() - end - for name in names(@__MODULE__; all = true) - f = getfield(@__MODULE__, name) - for (k, v) in lookup - if startswith("$name", k) - fname = replace("$name", k => "") - suite[v][fname] = BenchmarkTools.@benchmarkable $f() - break - end - end - end - return suite -end - -function runbenchmarks() - suite = benchmark_group() - return BenchmarkTools.run(suite) -end - -# sum -# -# nlexpr is slower because it builds up the product via operator overloading, -# creating a lot of temporary objects. @NL gets to see the full +(args...) to it -# builds the expression in-place. -# -# We could fix this by implementing a n-argy method for +, but that gets -# difficult with method ambiguities. - -function perf_nl_sum() - model = Model() - @variable(model, x) - @NLobjective(model, Min, sum(x^i for i in 1:10_000)) - return -end - -function perf_nlexpr_sum() - model = Model() - @variable(model, x) - @objective(model, Min, sum(x^Float64(i) for i in 1:10_000)) - return -end - -function perf_infopt_sum() - model = InfiniteOpt.InfiniteModel() - @variable(model, x) - @objective(model, Min, sum(x^i for i in 1:10_000)) - return -end - -function perf_symbolics_sum() - Symbolics.@variables x - sum(x^i for i in 1:10_000) - return -end - -# prod -# -# nlexpr is slower because it builds up the product via operator overloading, -# creating a lot of temporary objects. @NL gets to see the full *(args...) to it -# builds the expression in-place. -# -# We could fix this by implementing a n-argy method for *, but that gets -# difficult with method ambiguities. - -function perf_nl_prod() - model = Model() - @variable(model, x) - @NLobjective(model, Min, prod(x^i for i in 1:10_000)) - return -end - -function perf_nlexpr_prod() - model = Model() - @variable(model, x) - @objective(model, Min, prod(x^Float64(i) for i in 1:10_000)) - return -end - -function perf_infopt_prod() - model = InfiniteOpt.InfiniteModel() - @variable(model, x) - @objective(model, Min, prod(x^i for i in 1:10_000)) - return -end - -function perf_symbolics_prod() - Symbolics.@variables x - prod(x^i for i in 1:10_000) - return -end - -# many_constraints - -function perf_nl_many_constraints() - model = Model() - @variable(model, x[1:10_000]) - @NLconstraint(model, [i = 1:10_000], sin(x[i]) <= cos(i)) - return -end - -function perf_nlexpr_many_constraints() - model = Model() - @variable(model, x[1:10_000]) - @constraint(model, [i = 1:10_000], sin(x[i]) <= cos(i)) - return -end - -function perf_infopt_many_constraints() - model = InfiniteOpt.InfiniteModel() - @variable(model, x[1:10_000]) - @constraint(model, [i = 1:10_000], sin(x[i]) <= cos(i)) - return -end - -function perf_symbolics_many_constraints() - Symbolics.@variables x[1:10_000] - [sin(x[i]) - cos(i) for i in 1:10_000] - return -end - -# mle - -function perf_nl_mle() - Random.seed!(1234) - n = 1_000 - data = randn(n) - model = Model(Ipopt.Optimizer) - set_silent(model) - @variable(model, μ, start = 0.0) - @variable(model, σ >= 0.0, start = 1.0) - @NLobjective( - model, - Max, - n / 2 * log(1 / (2 * π * σ^2)) - - sum((data[i] - μ)^2 for i in 1:n) / (2 * σ^2) - ) - optimize!(model) - return -end - -function perf_nlexpr_mle() - Random.seed!(1234) - n = 1_000 - data = randn(n) - model = Model(Ipopt.Optimizer) - set_silent(model) - @variable(model, μ, start = 0.0) - @variable(model, σ >= 0.0, start = 1.0) - @objective( - model, - Max, - n / 2 * log(1 / (2 * π * σ^2)) - - sum((data[i] - μ)^2 for i in 1:n) / (2 * σ^2) - ) - optimize!(model) - return -end - -function perf_infopt_mle() - Random.seed!(1234) - n = 1_000 - data = randn(n) - model = InfiniteOpt.InfiniteModel(Ipopt.Optimizer) - set_silent(model) - @variable(model, μ, start = 0.0) - @variable(model, σ >= 0.0, start = 1.0) - @objective( - model, - Max, - n / 2 * log(1 / (2 * π * σ^2)) - - sum((data[i] - μ)^2 for i in 1:n) / (2 * σ^2) - ) - optimize!(model) - return -end - -function perf_symbolics_mle() - Random.seed!(1234) - n = 1_000 - data = randn(n) - Symbolics.@variables μ σ - n / 2 * log(1 / (2 * π * σ^2)) - - sum((data[i] - μ)^2 for i in 1:n) / (2 * σ^2) - return -end - -# clnlbeam - -function perf_nl_clnlbeam() - N = 1000 - h = 1 / N - alpha = 350 - model = Model(Ipopt.Optimizer) - set_silent(model) - @variables(model, begin - -1 <= t[1:(N+1)] <= 1 - -0.05 <= x[1:(N+1)] <= 0.05 - u[1:(N+1)] - end) - @NLobjective( - model, - Min, - sum( - 0.5 * h * (u[i+1]^2 + u[i]^2) + - 0.5 * alpha * h * (cos(t[i+1]) + cos(t[i])) for i in 1:N - ), - ) - @NLconstraint( - model, - [i = 1:N], - x[i+1] - x[i] - 0.5 * h * (sin(t[i+1]) + sin(t[i])) == 0, - ) - @constraint( - model, - [i = 1:N], - t[i+1] - t[i] - 0.5 * h * u[i+1] - 0.5 * h * u[i] == 0, - ) - optimize!(model) - return -end - -function perf_nlexpr_clnlbeam() - N = 1000 - h = 1 / N - alpha = 350 - model = Model(Ipopt.Optimizer) - set_silent(model) - @variables(model, begin - -1 <= t[1:(N+1)] <= 1 - -0.05 <= x[1:(N+1)] <= 0.05 - u[1:(N+1)] - end) - @objective( - model, - Min, - sum( - 0.5 * h * (u[i+1]^2 + u[i]^2) + - 0.5 * alpha * h * (cos(t[i+1]) + cos(t[i])) for i in 1:N - ), - ) - @constraint( - model, - [i = 1:N], - x[i+1] - x[i] - 0.5 * h * (sin(t[i+1]) + sin(t[i])) == 0, - ) - @constraint( - model, - [i = 1:N], - t[i+1] - t[i] - 0.5 * h * u[i+1] - 0.5 * h * u[i] == 0, - ) - optimize!(model) - return -end - -function perf_infopt_clnlbeam() - N = 1000 - h = 1 / N - alpha = 350 - model = InfiniteOpt.InfiniteModel(Ipopt.Optimizer) - set_silent(model) - @variables(model, begin - -1 <= t[1:(N+1)] <= 1 - -0.05 <= x[1:(N+1)] <= 0.05 - u[1:(N+1)] - end) - @objective( - model, - Min, - sum( - 0.5 * h * (u[i+1]^2 + u[i]^2) + - 0.5 * alpha * h * (cos(t[i+1]) + cos(t[i])) for i in 1:N - ), - ) - @constraint( - model, - [i = 1:N], - x[i+1] - x[i] - 0.5 * h * (sin(t[i+1]) + sin(t[i])) == 0, - ) - @constraint( - model, - [i = 1:N], - t[i+1] - t[i] - 0.5 * h * u[i+1] - 0.5 * h * u[i] == 0, - ) - optimize!(model) - return -end - -# rosenbrock - -function perf_nl_rosenbrock() - model = Model(Ipopt.Optimizer) - set_silent(model) - @variable(model, x) - @variable(model, y) - @NLobjective(model, Min, (1 - x)^2 + 100 * (y - x^2)^2) - optimize!(model) - return -end - -function perf_nlexpr_rosenbrock() - model = Model(Ipopt.Optimizer) - set_silent(model) - @variable(model, x) - @variable(model, y) - @objective(model, Min, (1 - x)^2 + 100 * (y - x^2)^2) - optimize!(model) - return -end - -function perf_infopt_rosenbrock() - model = InfiniteOpt.InfiniteModel(Ipopt.Optimizer) - set_silent(model) - @variable(model, x) - @variable(model, y) - @objective(model, Min, (1 - x)^2 + 100 * (y - x^2)^2) - optimize!(model) - return -end - -function perf_symbolics_rosenbrock() - Symbolics.@variables x y - (1 - x)^2 + 100 * (y - x^2)^2 - return -end - -# JuMP#2788 - -function perf_nl_jump_2788() - N = 400 - Random.seed!(1234) - k = N - n = 12 - p = rand(400:700, k, 1) - c1 = rand(100:200, k, n) - c2 = 0.9 .* c1 - b = rand(150:250, k, 1) - model = Model(Ipopt.Optimizer) - set_silent(model) - @variable(model, 0 <= x[i = 1:n] <= 1) - @variable(model, 0 <= var1 <= 1) - @variable(model, 0 <= var2 <= 1) - @variable(model, 0 <= var3 <= 1) - @objective(model, Max, var1 - var2 + var3) - @NLexpression(model, expr, sum(x[i] * p[i] for i in 1:n)) - @NLexpression(model, expr_c1[j = 1:k], sum(x[i] * c1[j, i] for i in 1:n)) - @NLexpression(model, expr_c2[j = 1:k], sum(x[i] * c2[j, i] for i in 1:n)) - @NLconstraint(model, expr == sum(b[j] / (1 + var1)^j for j in 1:k)) - @NLconstraint(model, expr == sum(expr_c1[j] / (1 + var2)^j for j in 1:k)) - @NLconstraint(model, expr == sum(expr_c2[j] / (1 + var3)^j for j in 1:k)) - @NLconstraint(model, [j = 1:k], expr_c1[j] >= b[j]) - optimize!(model) - return -end - -function perf_nlexpr_jump_2788() - N = 400 - Random.seed!(1234) - k = N - n = 12 - p = rand(400:700, k, 1) - c1 = rand(100:200, k, n) - c2 = 0.9 .* c1 - b = rand(150:250, k, 1) - model = Model(Ipopt.Optimizer) - set_silent(model) - @variable(model, 0 <= x[i = 1:n] <= 1) - @variable(model, 0 <= var1 <= 1) - @variable(model, 0 <= var2 <= 1) - @variable(model, 0 <= var3 <= 1) - @objective(model, Max, var1 - var2 + var3) - @expression(model, expr, sum(x[i] * p[i] for i in 1:n)) - @expression(model, expr_c1[j = 1:k], sum(x[i] * c1[j, i] for i in 1:n)) - @expression(model, expr_c2[j = 1:k], sum(x[i] * c2[j, i] for i in 1:n)) - @constraint(model, expr == sum(b[j] / (1 + var1)^Float64(j) for j in 1:k)) - @constraint( - model, - expr == sum(expr_c1[j] / (1 + var2)^Float64(j) for j in 1:k), - ) - @constraint( - model, - expr == sum(expr_c2[j] / (1 + var3)^Float64(j) for j in 1:k), - ) - @constraint(model, [j = 1:k], expr_c1[j] >= b[j]) - optimize!(model) - return -end - -function perf_infopt_jump_2788() - N = 400 - Random.seed!(1234) - k = N - n = 12 - p = rand(400:700, k, 1) - c1 = rand(100:200, k, n) - c2 = 0.9 .* c1 - b = rand(150:250, k, 1) - model = InfiniteOpt.InfiniteModel(Ipopt.Optimizer) - set_silent(model) - @variable(model, 0 <= x[i = 1:n] <= 1) - @variable(model, 0 <= var1 <= 1) - @variable(model, 0 <= var2 <= 1) - @variable(model, 0 <= var3 <= 1) - @objective(model, Max, var1 - var2 + var3) - @expression(model, expr, sum(x[i] * p[i] for i in 1:n)) - @expression(model, expr_c1[j = 1:k], sum(x[i] * c1[j, i] for i in 1:n)) - @expression(model, expr_c2[j = 1:k], sum(x[i] * c2[j, i] for i in 1:n)) - @constraint(model, expr == sum(b[j] / (1 + var1)^Float64(j) for j in 1:k)) - @constraint( - model, - expr == sum(expr_c1[j] / (1 + var2)^Float64(j) for j in 1:k), - ) - @constraint( - model, - expr == sum(expr_c2[j] / (1 + var3)^Float64(j) for j in 1:k), - ) - @constraint(model, [j = 1:k], expr_c1[j] >= b[j]) - optimize!(model) - return -end - -end # module diff --git a/test/perf/nonlinear_expr.py b/test/perf/nonlinear_expr.py deleted file mode 100644 index f60839949f9..00000000000 --- a/test/perf/nonlinear_expr.py +++ /dev/null @@ -1,143 +0,0 @@ -import pyomo.environ as pyo -from pyomo.opt import SolverFactory -import math -import random -import time - -def time_reps(func, iteration_limit=100, time_limit=10): - start = time.time() - reps = 0 - for i in range(0, iteration_limit): - func() - reps += 1 - if time.time() - start > time_limit: - break - end = time.time() - avg_ms = (end - start) / reps * 1000 - print("%s => %.3f ms" % (func.__name__, avg_ms)) - return - -def perf_pyomo_sum(): - model = pyo.ConcreteModel() - model.x = pyo.Var() - model.obj = pyo.Objective(expr=sum(model.x**i for i in range(10000))) - return - -def perf_pyomo_prod(): - model = pyo.ConcreteModel() - model.x = pyo.Var() - model.obj = pyo.Objective(expr=math.prod(model.x**i for i in range(10000))) - return - -def perf_pyomo_many_constraints(): - model = pyo.ConcreteModel() - model.X = pyo.RangeSet(0, 10000) - model.x = pyo.Var(model.X) - def constraint(model, i): - return pyo.sin(model.x[i]) <= pyo.cos(i) - model.c = pyo.Constraint(model.X, rule=constraint) - return - -def perf_pyomo_mle(): - model = pyo.ConcreteModel() - n = 1000 - model.x = pyo.Var(initialize=0.0) - model.y = pyo.Var(within=pyo.NonNegativeReals, initialize=1.0) - data = [random.random() for _ in range(n)] - model.obj = pyo.Objective( - expr = n / 2 * pyo.log(1 / (2 * math.pi * model.y**2)) - - sum((data[i] - model.x)**2 for i in range(n)) / (2 * model.y**2), - sense = pyo.maximize, - ) - opt = SolverFactory("ipopt") - opt.solve(model, tee=False) - return - -def perf_pyomo_clnlbeam(): - N = 1000 - h = 1 / N - alpha = 350 - model = pyo.ConcreteModel() - model.S = pyo.RangeSet(1,N+1) - model.S2 = pyo.RangeSet(1,N) - model.t = pyo.Var(model.S, bounds=(-1.0, 1.0)) - model.x = pyo.Var(model.S, bounds=(-0.05, 0.05)) - model.u = pyo.Var(model.S) - model.obj = pyo.Objective( - expr = sum( - 0.5 * h * (model.u[i+1]**2 + model.u[i]**2) + - 0.5 * alpha * h * (pyo.cos(model.t[i+1]) + pyo.cos(model.t[i])) - for i in model.S2 - ) - ) - def con_1(model, i): - return model.x[i+1] - model.x[i] - 0.5 * h * (pyo.sin(model.t[i+1]) + pyo.sin(model.t[i])) == 0 - model.c1 = pyo.Constraint(model.S2, rule=con_1) - def con_2(model, i): - return model.t[i+1] - model.t[i] - 0.5 * h * model.u[i+1] - 0.5 * h * model.u[i] == 0 - model.c2 = pyo.Constraint(model.S2, rule=con_2) - opt = SolverFactory("ipopt") - opt.solve(model, tee=False) - return - -def perf_pyomo_rosenbrock(): - model = pyo.ConcreteModel() - model.x = pyo.Var() - model.y = pyo.Var() - model.obj = pyo.Objective( - expr = (1 - model.x)**2 + 100 * (model.y - model.x**2)**2 - ) - opt = SolverFactory("ipopt") - opt.solve(model, tee=False) - return - -def perf_pyomo_jump_2788(): - N = 400 - k = N - n = 12 - p = [random.randint(400, 700) for _ in range(k)] - c1 = [[random.randint(100, 200) for _ in range(k)] for _ in range(n)] - b = [random.randint(150, 250) for _ in range(k)] - model = pyo.ConcreteModel() - model.S = pyo.RangeSet(1, n) - model.K = pyo.RangeSet(1, k) - model.x = pyo.Var(model.S, bounds=(0, 1)) - model.var1 = pyo.Var(bounds=(0, 1)) - model.var2 = pyo.Var(bounds=(0, 1)) - model.var3 = pyo.Var(bounds=(0, 1)) - model.obj = pyo.Objective( - expr=model.var1 - model.var2 + model.var3, - sense=pyo.maximize, - ) - model.expr = sum(model.x[i] * p[i-1] for i in model.S) - def expr_c1(model, j): - return sum(model.x[i] * c1[i-1][j-1] for i in model.S) - def expr_c2(model, j): - return sum(model.x[i] * 0.9 * c1[i-1][j-1] for i in model.S) - model.con1 = pyo.Constraint( - expr=model.expr==sum(b[j-1]/(1+model.var1)**j for j in model.K), - ) - model.con2 = pyo.Constraint( - expr=model.expr==sum(expr_c1(model, j)/(1+model.var2)**j for j in model.K), - ) - model.con3 = pyo.Constraint( - expr=model.expr==sum(expr_c2(model, j)/(1+model.var3)**j for j in model.K) - ) - def con_4(model, j): - return expr_c1(model, j) >= b[j-1] - model.con4 = pyo.Constraint(model.K, rule=con_4) - opt = SolverFactory("ipopt") - opt.solve(model, tee=False) - return - -if __name__ == "__main__": - for f in [ - perf_pyomo_sum, - perf_pyomo_prod, - perf_pyomo_many_constraints, - perf_pyomo_mle, - perf_pyomo_clnlbeam, - perf_pyomo_rosenbrock, - perf_pyomo_jump_2788, - ]: - time_reps(f) diff --git a/test/runtests.jl b/test/runtests.jl index d6876c9b1c6..30f203a8108 100755 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -8,9 +8,6 @@ # See https://github.com/jump-dev/JuMP.jl ############################################################################# -import Pkg -Pkg.pkg"add MathOptInterface#od/nlp-expr" - import JuMP import Test diff --git a/test/test_macros.jl b/test/test_macros.jl index 51729c34c25..993fe62a701 100644 --- a/test/test_macros.jl +++ b/test/test_macros.jl @@ -616,13 +616,6 @@ function test_Nested_tuple_destructuring() return end -function test_Error_on_unexpected_comparison() - m = Model() - @variable(m, x) - @test_throws ErrorException @expression(m, x <= 1) - return -end - function test_Lookup_in_model_scope_variable() model = Model() @variable(model, x) diff --git a/test/test_nlp.jl b/test/test_nlp.jl index bf20db8903b..c1279f18185 100644 --- a/test/test_nlp.jl +++ b/test/test_nlp.jl @@ -1601,4 +1601,52 @@ function test_parse_expression_quadexpr_multivariate_sum() return end +function test_parse_expression_nonlinearexpr_call() + model = Model() + @variable(model, x) + @variable(model, y) + f = NonlinearExpr(:ifelse, Any[x, 0, y]) + @NLexpression(model, ref, f) + nlp = nonlinear_model(model) + expr = :(ifelse($x, 0, $y)) + @test MOI.Nonlinear.parse_expression(nlp, expr) == nlp[index(ref)] + return +end + +function test_parse_expression_nonlinearexpr_or() + model = Model() + @variable(model, x) + @variable(model, y) + f = NonlinearExpr(:||, Any[x, y]) + @NLexpression(model, ref, f) + nlp = nonlinear_model(model) + expr = :($x || $y) + @test MOI.Nonlinear.parse_expression(nlp, expr) == nlp[index(ref)] + return +end + +function test_parse_expression_nonlinearexpr_and() + model = Model() + @variable(model, x) + @variable(model, y) + f = NonlinearExpr(:&&, Any[x, y]) + @NLexpression(model, ref, f) + nlp = nonlinear_model(model) + expr = :($x && $y) + @test MOI.Nonlinear.parse_expression(nlp, expr) == nlp[index(ref)] + return +end + +function test_parse_expression_nonlinearexpr_unsupported() + model = Model() + @variable(model, x) + @variable(model, y) + f = NonlinearExpr(:foo, Any[x, y]) + @test_throws( + MOI.UnsupportedNonlinearOperator, + @NLexpression(model, ref, f), + ) + return +end + end diff --git a/test/test_nlp_expr.jl b/test/test_nlp_expr.jl index 95a798fefab..7e62f951a68 100644 --- a/test/test_nlp_expr.jl +++ b/test/test_nlp_expr.jl @@ -8,160 +8,217 @@ module TestNLPExpr using JuMP using Test -function test_univariate_operators() - model = Model() +function test_extension_univariate_operators( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() @variable(model, x) for f in MOI.Nonlinear.DEFAULT_UNIVARIATE_OPERATORS if f in (:+, :-, :abs2) op = getfield(Base, f) - @test op(sin(x)) isa NonlinearExpr + @test op(sin(x)) isa NonlinearExpr{VariableRefType} elseif isdefined(Base, f) op = getfield(Base, f) - @test op(x) isa NonlinearExpr + @test op(x) isa NonlinearExpr{VariableRefType} elseif isdefined(MOI.Nonlinear.SpecialFunctions, f) op = getfield(MOI.Nonlinear.SpecialFunctions, f) - @test op(x) isa NonlinearExpr + @test op(x) isa NonlinearExpr{VariableRefType} end end return end -function test_binary_operators() - model = Model() +function test_extension_binary_operators( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() @variable(model, x) num, aff, quad, nlp = 1.0, 1.0 + x, x^2, sin(x) for op in (+, -, *, /), a in (num, x, aff, quad, nlp) - @test op(a, nlp) isa NonlinearExpr - @test op(nlp, a) isa NonlinearExpr + @test op(a, nlp) isa NonlinearExpr{VariableRefType} + @test op(nlp, a) isa NonlinearExpr{VariableRefType} end for op in (*, /), a in (x, aff) - @test op(a, quad) isa NonlinearExpr - @test op(quad, a) isa NonlinearExpr + @test op(a, quad) isa NonlinearExpr{VariableRefType} + @test op(quad, a) isa NonlinearExpr{VariableRefType} end for a in (num, x, aff, quad), b in (x, aff, quad) - @test /(a, b) isa NonlinearExpr + @test /(a, b) isa NonlinearExpr{VariableRefType} end return end -function test_objective() - model = Model() +function test_extension_objective( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() @variable(model, x) @objective(model, Min, 2.0 * sin(x)^2 + cos(x) / x) - @test objective_function(model) isa NonlinearExpr + @test objective_function(model) isa NonlinearExpr{VariableRefType} return end -function test_expression() - model = Model() +function test_extension_expression( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() @variable(model, x) @variable(model, y[1:3]) - @test string(@expression(model, *(y...))) == "*(y[1]*y[2], y[3])" + @test string(@expression(model, *(y...))) == "(y[1]*y[2] * y[3])" @test string(@expression(model, sin(x))) == "sin(x)" - @test string(@expression(model, 2^x)) == "^(2.0, x)" - @test string(@expression(model, x^x)) == "^(x, x)" - @test string(@expression(model, sin(x)^2)) == "^(sin(x), 2.0)" - @test string(@expression(model, sin(x)^2.0)) == "^(sin(x), 2.0)" - @test string(@expression(model, 2 * sin(x)^2.0)) == "*(2.0, ^(sin(x), 2.0))" - @test string(@expression(model, 1 + sin(x))) == "+(1.0, sin(x))" - @test string(@expression(model, 1 + 2 * sin(x))) == "+(1.0, *(2.0, sin(x)))" + @test string(@expression(model, 2^x)) == "(2.0 ^ x)" + @test string(@expression(model, x^x)) == "(x ^ x)" + @test string(@expression(model, sin(x)^2)) == "(sin(x) ^ 2.0)" + @test string(@expression(model, sin(x)^2.0)) == "(sin(x) ^ 2.0)" + @test string(@expression(model, 2 * sin(x)^2.0)) == "(2.0 * (sin(x) ^ 2.0))" + @test string(@expression(model, 1 + sin(x))) == "(1.0 + sin(x))" + @test string(@expression(model, 1 + 2 * sin(x))) == "(1.0 + (2.0 * sin(x)))" @test string(@expression(model, 2.0 * sin(x)^2 + cos(x) / x)) == - "+(*(2.0, ^(sin(x), 2.0)), /(cos(x), x))" + "((2.0 * (sin(x) ^ 2.0)) + (cos(x) / x))" @test string(@expression(model, 2.0 * sin(x)^2 - cos(x) / x)) == - "-(*(2.0, ^(sin(x), 2.0)), /(cos(x), x))" + "((2.0 * (sin(x) ^ 2.0)) - (cos(x) / x))" return end -function test_flatten_nary() - model = Model() +function test_extension_flatten_nary( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() @variable(model, x) - @test string(zero(NonlinearExpr) + 1) == "+(+(0.0), 1.0)" - @test string(zero(NonlinearExpr) + x) == "+(+(0.0), x)" - @test string(sin(x) + sin(x) + 1) == "+(+(sin(x), sin(x)), 1.0)" - @test string(sin(x) + sin(x) + x) == "+(+(sin(x), sin(x)), x)" - @test string(sin(x) * sin(x) * 1) == "*(*(sin(x), sin(x)), 1.0)" - @test string(sin(x) * sin(x) * x) == "*(*(sin(x), sin(x)), x)" + expr_plus = NonlinearExpr{VariableRefType}(:+, Any[x]) + expr_mult = NonlinearExpr{VariableRefType}(:*, Any[x]) + expr_sin = NonlinearExpr{VariableRefType}(:sin, Any[x]) + to_string(x) = string(flatten(x)) + @test to_string(+(expr_plus, 1)) == "(x + 1.0)" + @test to_string(+(1, expr_plus)) == "(1.0 + x)" + @test to_string(+(expr_plus, x)) == "(x + x)" + @test to_string(+(expr_sin, x)) == "(sin(x) + x)" + @test to_string(+(x, expr_plus)) == "(x + x)" + @test to_string(+(x, expr_sin)) == "(x + sin(x))" + @test to_string(+(expr_plus, expr_plus)) == "(x + x)" + @test to_string(+(expr_plus, expr_sin)) == "(x + sin(x))" + @test to_string(+(expr_sin, expr_plus)) == "(sin(x) + x)" + @test to_string(+(expr_sin, expr_sin)) == "(sin(x) + sin(x))" + @test to_string(*(expr_mult, 2)) == "(x * 2.0)" + @test to_string(*(2, expr_mult)) == "(2.0 * x)" + @test to_string(*(expr_mult, x)) == "(x * x)" + @test to_string(*(expr_sin, x)) == "(sin(x) * x)" + @test to_string(*(x, expr_mult)) == "(x * x)" + @test to_string(*(x, expr_sin)) == "(x * sin(x))" + @test to_string(*(expr_mult, expr_mult)) == "(x * x)" + @test to_string(*(expr_mult, expr_sin)) == "(x * sin(x))" + @test to_string(*(expr_sin, expr_mult)) == "(sin(x) * x)" + @test to_string(*(expr_sin, expr_sin)) == "(sin(x) * sin(x))" + @test to_string(sin(+(expr_plus, 1))) == "sin((x + 1.0))" + @test to_string(sin(*(expr_mult, expr_mult))) == "sin((x * x))" return end -function test_zero_one() - @test string(zero(NonlinearExpr)) == "+(0.0)" - @test string(one(NonlinearExpr)) == "+(1.0)" +function test_extension_zero_one( + ModelType = Model, + VariableRefType = VariableRef, +) + @test string(zero(NonlinearExpr{VariableRefType})) == "+(0.0)" + @test string(one(NonlinearExpr{VariableRefType})) == "+(1.0)" return end -function test_latex() - model = Model() +function test_extension_latex(ModelType = Model, VariableRefType = VariableRef) + model = ModelType() @variable(model, x) - @test function_string(MIME("text/latex"), sin(x)) == "\\textsf{sin(x)}" + @test function_string(MIME("text/latex"), sin(x)) == + raw"\textsf{sin}\left({x}\right)" @test function_string(MIME("text/plain"), sin(x)) == "sin(x)" + @expression(model, g, ifelse(x > 0, sin(x), x + cos(x)^2)) + @test function_string(MIME("text/latex"), g) == + raw"\textsf{ifelse}\left({\left({x} > {0}\right)}, {\textsf{sin}\left({x}\right)}, {\left({x} + {\left({\textsf{cos}\left({x}\right)} ^ {2.0}\right)}\right)}\right)" return end -function test_expression_addmul() - model = Model() +function test_extension_expression_addmul( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() @variable(model, x) - @test string(@expression(model, x + 3 * sin(x))) == "+(x, *(3.0, sin(x)))" + @test string(@expression(model, x + 3 * sin(x))) == "(x + (3.0 * sin(x)))" @test string(@expression(model, 2 * x + 3 * sin(x))) == - "+(2 x, *(3.0, sin(x)))" + "(2 x + (3.0 * sin(x)))" @test string(@expression(model, x^2 + 3 * sin(x))) == - "+($(x^2), *(3.0, sin(x)))" + "($(x^2) + (3.0 * sin(x)))" @test string(@expression(model, sin(x) + 3 * sin(x))) == - "+(sin(x), *(3.0, sin(x)))" - @test string(@expression(model, sin(x) + 3 * x)) == "+(sin(x), 3 x)" + "(sin(x) + (3.0 * sin(x)))" + @test string(@expression(model, sin(x) + 3 * x)) == "(sin(x) + 3 x)" @test string(@expression(model, sin(x) + 3 * x * x)) == - "+(sin(x), 3 $(x^2))" + "(sin(x) + 3 $(x^2))" return end -function test_expression_submul() - model = Model() +function test_extension_expression_submul( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() @variable(model, x) - @test string(@expression(model, x - 3 * sin(x))) == "-(x, *(3.0, sin(x)))" + @test string(@expression(model, x - 3 * sin(x))) == "(x - (3.0 * sin(x)))" @test string(@expression(model, 2 * x - 3 * sin(x))) == - "-(2 x, *(3.0, sin(x)))" + "(2 x - (3.0 * sin(x)))" @test string(@expression(model, x^2 - 3 * sin(x))) == - "-($(x^2), *(3.0, sin(x)))" + "($(x^2) - (3.0 * sin(x)))" @test string(@expression(model, sin(x) - 3 * sin(x))) == - "-(sin(x), *(3.0, sin(x)))" - @test string(@expression(model, sin(x) - 3 * x)) == "-(sin(x), 3 x)" + "(sin(x) - (3.0 * sin(x)))" + @test string(@expression(model, sin(x) - 3 * x)) == "(sin(x) - 3 x)" @test string(@expression(model, sin(x) - 3 * x * x)) == - "-(sin(x), 3 $(x^2))" + "(sin(x) - 3 $(x^2))" return end -function test_aff_expr_convert() - model = Model() +function test_extension_aff_expr_convert( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() @variable(model, x) - _to_string(x) = string(convert(NonlinearExpr, x)) + _to_string(x) = string(convert(NonlinearExpr{VariableRefType}, x)) @test _to_string(AffExpr(0.0)) == "0.0" @test _to_string(AffExpr(1.0)) == "1.0" - @test _to_string(x + 1) == "+(x, 1.0)" - @test _to_string(2x + 1) == "+(*(2.0, x), 1.0)" - @test _to_string(2x) == "*(2.0, x)" + @test _to_string(x + 1) == "(x + 1.0)" + @test _to_string(2x + 1) == "((2.0 * x) + 1.0)" + @test _to_string(2x) == "(2.0 * x)" return end -function test_quad_expr_convert() - model = Model() +function test_extension_quad_expr_convert( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() @variable(model, x) - _to_string(x) = string(convert(NonlinearExpr, x)) + _to_string(x) = string(convert(NonlinearExpr{VariableRefType}, x)) @test _to_string(QuadExpr(AffExpr(0.0))) == "0.0" @test _to_string(QuadExpr(AffExpr(1.0))) == "1.0" - @test _to_string(x^2 + 1) == "+(*(x, x), 1.0)" - @test _to_string(2x^2 + 1) == "+(*(2.0, x, x), 1.0)" - @test _to_string(2x^2) == "*(2.0, x, x)" - @test _to_string(x^2 + x + 1) == "+(x, *(x, x), 1.0)" - @test _to_string(2x^2 + x + 1) == "+(x, *(2.0, x, x), 1.0)" - @test _to_string(2x^2 + x) == "+(x, *(2.0, x, x))" - @test _to_string(x^2 + 2x + 1) == "+(*(2.0, x), *(x, x), 1.0)" - @test _to_string(2x^2 + 2x + 1) == "+(*(2.0, x), *(2.0, x, x), 1.0)" - @test _to_string(2x^2 + 2x) == "+(*(2.0, x), *(2.0, x, x))" + @test _to_string(x^2 + 1) == "((x * x) + 1.0)" + @test _to_string(2x^2 + 1) == "((2.0 * x * x) + 1.0)" + @test _to_string(2x^2) == "(2.0 * x * x)" + @test _to_string(x^2 + x + 1) == "(x + (x * x) + 1.0)" + @test _to_string(2x^2 + x + 1) == "(x + (2.0 * x * x) + 1.0)" + @test _to_string(2x^2 + x) == "(x + (2.0 * x * x))" + @test _to_string(x^2 + 2x + 1) == "((2.0 * x) + (x * x) + 1.0)" + @test _to_string(2x^2 + 2x + 1) == "((2.0 * x) + (2.0 * x * x) + 1.0)" + @test _to_string(2x^2 + 2x) == "((2.0 * x) + (2.0 * x * x))" return end -function test_constraint_name() - model = Model() +function test_extension_constraint_name( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() @variable(model, x) @constraint(model, c, sin(x) <= 1) @test name(c) == "c" @@ -171,8 +228,11 @@ function test_constraint_name() return end -function test_constraint_lessthan() - model = Model() +function test_extension_constraint_lessthan( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() @variable(model, x) @constraint(model, c, 2.0 * sin(x)^2 + cos(x) / x <= 1) obj = constraint_object(c) @@ -181,8 +241,11 @@ function test_constraint_lessthan() return end -function test_constraint_greaterthan() - model = Model() +function test_extension_constraint_greaterthan( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() @variable(model, x) @constraint(model, c, 2.0 * sin(x)^2 + cos(x) / x >= 1) obj = constraint_object(c) @@ -191,8 +254,11 @@ function test_constraint_greaterthan() return end -function test_constraint_equalto() - model = Model() +function test_extension_constraint_equalto( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() @variable(model, x) @constraint(model, c, 2.0 * sin(x)^2 + cos(x) / x == 1) obj = constraint_object(c) @@ -201,8 +267,11 @@ function test_constraint_equalto() return end -function test_constraint_interval() - model = Model() +function test_extension_constraint_interval( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() @variable(model, x) @constraint(model, c, 0 <= 2.0 * sin(x)^2 + cos(x) / x <= 1) obj = constraint_object(c) @@ -215,18 +284,21 @@ function test_user_defined_function_overload() model = Model() @variable(model, x) f(x::Real) = x^2 - f(x::AbstractJuMPScalar) = NonlinearExpr(:f, x) + f(x::AbstractJuMPScalar) = NonlinearExpr{VariableRef}(:f, x) register(model, :f, 1, f; autodiff = true) @test string(@expression(model, f(x))) == "f(x)" - @test string(f(x) + f(x)) == "+(f(x), f(x))" + @test string(f(x) + f(x)) == "(f(x) + f(x))" return end -function test_nonlinear_matrix_algebra() - model = Model() +function test_extension_nonlinear_matrix_algebra( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() @variable(model, X[1:3, 1:3], Symmetric) @objective(model, Max, sum(X^4 .- X^3)) - @test objective_function(model) isa NonlinearExpr + @test objective_function(model) isa NonlinearExpr{VariableRefType} return end @@ -234,14 +306,17 @@ end This test checks that we can work with expressions of arbitrary depth. Don't use recursion! """ -function test_recursion_stackoverflow() - model = Model() +function test_extension_recursion_stackoverflow( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() @variable(model, x) expr = sin(x) for _ in 1:20_000 expr = sin(expr) end - @test @objective(model, Min, expr) isa NonlinearExpr + @test @objective(model, Min, expr) isa NonlinearExpr{VariableRefType} @test string(expr) isa String return end @@ -252,7 +327,7 @@ function test_nlparameter_interaction() @NLparameter(model, p == 1) e = x + p @test e isa NonlinearExpr - @test string(e) == "+(x, $p)" + @test string(e) == "(x + $p)" return end @@ -262,7 +337,7 @@ function test_nlexpression_interaction() @NLexpression(model, expr, sin(x)) e = x + expr @test e isa NonlinearExpr - @test string(e) == "+(x, $expr)" + @test string(e) == "(x + $expr)" return end @@ -296,7 +371,7 @@ function test_jump_function_nonlinearexpr() @NLexpression(model, expr1, sin(p + x)) @NLexpression(model, expr2, sin(expr1)) nlp = nonlinear_model(model) - @test string(jump_function(model, nlp[index(expr1)])) == "sin(+($p, $x))" + @test string(jump_function(model, nlp[index(expr1)])) == "sin(($p + $x))" @test string(jump_function(model, nlp[index(expr2)])) == "sin($expr1)" return end @@ -312,10 +387,13 @@ function test_constraint_object() return end -function test_expr_mle() +function test_extension_expr_mle( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() data = [1.0, 2.0, 4.0, 8.0] n = length(data) - model = Model() @variable(model, x) @variable(model, y) obj = @expression( @@ -324,7 +402,206 @@ function test_expr_mle() sum((data[i] - x)^2 for i in 1:n) / (2 * y^2) ) @test string(obj) == - "-(*(2.0, log(/(1.0, 2 $(y^2)))), /(4 $(x^2) - 30 x + 85, 2 $(y^2)))" + "((2.0 * log((1.0 / 2 $(y^2)))) - (4 $(x^2) - 30 x + 85 / 2 $(y^2)))" + return +end + +function test_extension_nl_macro( + ModelType = Model, + VariableRefType = VariableRef, +) + model = ModelType() + @variable(model, x) + @test isequal_canonical( + @expression(model, ifelse(x, 1, 2)), + NonlinearExpr(:ifelse, Any[x, 1, 2]), + ) + @test isequal_canonical( + @expression(model, x || 1), + NonlinearExpr(:||, Any[x, 1]), + ) + @test isequal_canonical( + @expression(model, x && 1), + NonlinearExpr(:&&, Any[x, 1]), + ) + @test isequal_canonical( + @expression(model, x < 0), + NonlinearExpr(:<, Any[x, 0]), + ) + @test isequal_canonical( + @expression(model, x > 0), + NonlinearExpr(:>, Any[x, 0]), + ) + @test isequal_canonical( + @expression(model, x <= 0), + NonlinearExpr(:<=, Any[x, 0]), + ) + @test isequal_canonical( + @expression(model, x >= 0), + NonlinearExpr(:>=, Any[x, 0]), + ) + @test isequal_canonical( + @expression(model, x == 0), + NonlinearExpr(:(==), Any[x, 0]), + ) + @test isequal_canonical( + @expression(model, 0 < x <= 1), + NonlinearExpr( + :&&, + Any[@expression(model, 0 < x), @expression(model, x <= 1)], + ), + ) + @test isequal_canonical( + @expression(model, ifelse(x > 0, x^2, sin(x))), + NonlinearExpr(:ifelse, Any[@expression(model, x > 0), x^2, sin(x)]), + ) + return +end + +function test_register_univariate() + model = Model() + @variable(model, x) + @register(model, f, 1, x -> x^2) + @test isequal_canonical(@expression(model, f(x)), f(x)) + @test isequal_canonical(f(x), NonlinearExpr(:f, Any[x])) + attrs = MOI.get(model, MOI.ListOfModelAttributesSet()) + @test MOI.UserDefinedFunction(:f, 1) in attrs + return +end + +function test_register_univariate_gradient() + model = Model() + @variable(model, x) + @register(model, f, 1, x -> x^2, x -> 2 * x) + @test isequal_canonical(@expression(model, f(x)), f(x)) + @test isequal_canonical(f(x), NonlinearExpr(:f, Any[x])) + attrs = MOI.get(model, MOI.ListOfModelAttributesSet()) + @test MOI.UserDefinedFunction(:f, 1) in attrs + return +end + +function test_register_univariate_gradient_hessian() + model = Model() + @variable(model, x) + @register(model, f, 1, x -> x^2, x -> 2 * x, x -> 2.0) + @test isequal_canonical(@expression(model, f(x)), f(x)) + @test isequal_canonical(f(x), NonlinearExpr(:f, Any[x])) + attrs = MOI.get(model, MOI.ListOfModelAttributesSet()) + @test MOI.UserDefinedFunction(:f, 1) in attrs + return +end + +function test_register_multivariate_() + model = Model() + @variable(model, x[1:2]) + f = (x...) -> sum(x .^ 2) + @register(model, foo, 2, f) + @test isequal_canonical(@expression(model, foo(x...)), foo(x...)) + @test isequal_canonical(foo(x...), NonlinearExpr(:foo, Any[x...])) + attrs = MOI.get(model, MOI.ListOfModelAttributesSet()) + @test MOI.UserDefinedFunction(:foo, 2) in attrs + return +end + +function test_register_multivariate_gradient() + model = Model() + @variable(model, x[1:2]) + f = (x...) -> sum(x .^ 2) + ∇f = (g, x...) -> (g .= 2 .* x) + @register(model, foo, 2, f, ∇f) + @test isequal_canonical(@expression(model, foo(x...)), foo(x...)) + @test isequal_canonical(foo(x...), NonlinearExpr(:foo, Any[x...])) + attrs = MOI.get(model, MOI.ListOfModelAttributesSet()) + @test MOI.UserDefinedFunction(:foo, 2) in attrs + return +end + +function test_register_multivariate_gradient_hessian() + model = Model() + @variable(model, x[1:2]) + f = (x...) -> sum(x .^ 2) + ∇f = (g, x...) -> (g .= 2 .* x) + ∇²f = (H, x...) -> begin + for i in 1:2 + H[i, i] = 2.0 + end + end + @register(model, foo, 2, f, ∇f, ∇²f) + @test isequal_canonical(@expression(model, foo(x...)), foo(x...)) + @test isequal_canonical(foo(x...), NonlinearExpr(:foo, Any[x...])) + attrs = MOI.get(model, MOI.ListOfModelAttributesSet()) + @test MOI.UserDefinedFunction(:foo, 2) in attrs + return +end + +function test_register_errors() + model = Model() + @test_throws( + ErrorException( + "Unable to register user-defined function foo: invalid number of " * + "functions provided. Got 0, but expected 1 (if function only), " * + "2 (if function and gradient), or 3 (if function, gradient, and " * + "hesssian provided)", + ), + @register(model, foo, 2), + ) + return +end + +function test_expression_no_variable() + head, args = :sin, Any[1] + @test_throws( + ErrorException( + "Unable to create a nonlinear expression because it did not " * + "contain any JuMP scalars. head = $head, args = $args.", + ), + NonlinearExpr(head, args), + ) + return +end + +function test_value_expression() + model = Model() + @variable(model, x) + f = x -> 1.1 + @test value(f, sin(x)) ≈ sin(1.1) + @test value(f, sin(x) + cos(x)) ≈ sin(1.1) + cos(1.1) + @test value(f, x^1.3 / x) ≈ 1.1^1.3 / 1.1 + @test value(f, @expression(model, ifelse(x > 1, 1, 2))) ≈ 1 + @test value(f, @expression(model, ifelse(x < 1, 1, 2))) ≈ 2 + @test value(f, @expression(model, ifelse(x < 1 || x > 2, 1, 2))) ≈ 2 + @test value(f, @expression(model, ifelse(x < 1 && x > 2, 1, 2))) ≈ 2 + @test value(f, sin(x + 1)) ≈ sin(1.1 + 1) + @test value(f, sin(x^2 + x + 1)) ≈ sin(1.1^2 + 1.1 + 1) + foo(x) = (x - 1)^2 + bar(x, y) = sqrt(x - y) + @register(model, my_foo, 1, foo) + @register(model, my_bar, 2, bar) + @test value(f, my_foo(x)) ≈ (1.1 - 1)^2 + @test value(f, my_foo(x + 1)) ≈ (1.1 + 1 - 1)^2 + @test value(f, my_foo(x^2 + 1)) ≈ (1.1^2 + 1 - 1)^2 + @test value(f, my_foo(x^2 + x + 1)) ≈ (1.1^2 + 1.1 + 1 - 1)^2 + y = QuadExpr(x + 1) + @test value(f, my_foo(y)) ≈ (value(f, y) - 1)^2 + @test value(f, my_bar(2.2, x)) ≈ sqrt(2.2 - 1.1) + bad_udf = UserDefinedFunction(:bad_udf) + @test_throws( + ErrorException( + "Unable to evaluate nonlinear operator bad_udf because it is not " * + "registered", + ), + value(f, bad_udf(x)), + ) + return +end + +function test_show_nonlinear_model() + model = Model() + @variable(model, x >= -1) + @objective(model, Min, exp(x)) + @constraint(model, sin(x) <= 0) + str = sprint(show, model) + @test occursin("NonlinearExpr{", str) return end diff --git a/test/test_operator.jl b/test/test_operator.jl index dbc5a1d7946..1b395b6866e 100644 --- a/test/test_operator.jl +++ b/test/test_operator.jl @@ -106,10 +106,11 @@ function test_extension_broadcast_division_error( copy(B.rowval), vec(x), ) - @test A ./ x isa Matrix{NonlinearExpr} - @test B ./ x isa SparseArrays.SparseMatrixCSC{NonlinearExpr,Int} - @test A ./ y isa SparseArrays.SparseMatrixCSC{NonlinearExpr,Int} - @test B ./ y isa SparseArrays.SparseMatrixCSC{NonlinearExpr,Int} + NonlinearExprType = NonlinearExpr{VariableRefType} + @test A ./ x isa Matrix{NonlinearExprType} + @test B ./ x isa SparseArrays.SparseMatrixCSC{NonlinearExprType,Int} + @test A ./ y isa SparseArrays.SparseMatrixCSC{NonlinearExprType,Int} + @test B ./ y isa SparseArrays.SparseMatrixCSC{NonlinearExprType,Int} # TODO: Refactor to avoid calling the internal JuMP function # `_densify_with_jump_eltype`. #z = _densify_with_jump_eltype((2 .* y) ./ 3) @@ -335,17 +336,17 @@ function test_extension_basic_operators_number( @test_expression_with_string 4.13 + w "w + 4.13" @test_expression_with_string 3.16 - w "-w + 3.16" @test_expression_with_string 5.23 * w "5.23 w" - @test_expression_with_string 2.94 / w "/(2.94, w)" + @test_expression_with_string 2.94 / w "(2.94 / w)" # 1-3 Number--AffExpr @test_expression_with_string 1.5 + aff "7.1 x + 4" @test_expression_with_string 1.5 - aff "-7.1 x - 1" @test_expression_with_string 2 * aff "14.2 x + 5" - @test_expression_with_string 2 / aff "/(2.0, 7.1 x + 2.5)" + @test_expression_with_string 2 / aff "(2.0 / 7.1 x + 2.5)" # 1-4 Number--QuadExpr @test_expression_with_string 1.5 + q "2.5 y*z + 7.1 x + 4" @test_expression_with_string 1.5 - q "-2.5 y*z - 7.1 x - 1" @test_expression_with_string 2 * q "5 y*z + 14.2 x + 5" - @test_expression_with_string 2 / q "/(2.0, 2.5 y*z + 7.1 x + 2.5)" + @test_expression_with_string 2 / q "(2.0 / 2.5 y*z + 7.1 x + 2.5)" return end @@ -373,30 +374,30 @@ function test_extension_basic_operators_variable( @test_expression_with_string w / T(2) "0.5 w" @test w == w @test_expression_with_string x * y - 1 "x*y - 1" - @test_expression_with_string x^2 "x²" - @test_expression_with_string x^1 "x" - @test_expression_with_string x^0 "1" - @test_throws ErrorException x^3 - @test_expression_with_string x^(T(15) / T(10)) "^(x, 1.5)" + @test_expression_with_string(x^2, "x²", interrable = false) + @test_expression_with_string(x^1, "x", interrable = false) + @test_expression_with_string(x^0, "1", interrable = false) + @test_expression_with_string(x^3, "(x ^ 3)", interrable = false) + @test_expression_with_string x^(T(15) / T(10)) "(x ^ 1.5)" # 2-2 Variable--Variable @test_expression_with_string w + x "w + x" @test_expression_with_string w - x "w - x" @test_expression_with_string w * x "w*x" @test_expression_with_string x - x "0" - @test_expression_with_string w / x "/(w, x)" + @test_expression_with_string w / x "(w / x)" @test_expression_with_string y * z - x "y*z - x" # 2-3 Variable--AffExpr @test_expression_with_string z + aff "z + 7.1 x + 2.5" @test_expression_with_string z - aff "z - 7.1 x - 2.5" @test_expression_with_string z * aff "7.1 z*x + 2.5 z" - @test_expression_with_string z / aff "/(z, 7.1 x + 2.5)" + @test_expression_with_string z / aff "(z / 7.1 x + 2.5)" @test_throws MethodError z ≤ aff @test_expression_with_string β * x - aff "0 x - 2.5" # 2-4 Variable--QuadExpr @test_expression_with_string w + q "2.5 y*z + w + 7.1 x + 2.5" @test_expression_with_string w - q "-2.5 y*z + w - 7.1 x - 2.5" - @test_expression_with_string w * q "*(w, 2.5 y*z + 7.1 x + 2.5)" - @test_expression_with_string w / q "/(w, 2.5 y*z + 7.1 x + 2.5)" + @test_expression_with_string w * q "(w * 2.5 y*z + 7.1 x + 2.5)" + @test_expression_with_string w / q "(w / 2.5 y*z + 7.1 x + 2.5)" @test transpose(x) === x @test conj(x) === x return @@ -427,34 +428,50 @@ function test_extension_basic_operators_affexpr( @test aff == aff @test_throws MethodError aff ≥ 1 @test_expression_with_string aff - 1 "7.1 x + 1.5" - @test_expression_with_string aff^2 "50.41 x² + 35.5 x + 6.25" - @test_expression_with_string (7.1 * x + 2.5)^2 "50.41 x² + 35.5 x + 6.25" - @test_expression_with_string aff^1 "7.1 x + 2.5" - @test_expression_with_string (7.1 * x + 2.5)^1 "7.1 x + 2.5" - @test_expression_with_string aff^0 "1" - @test_expression_with_string (7.1 * x + 2.5)^0 "1" - @test_throws ErrorException aff^3 - @test_throws ErrorException (7.1 * x + 2.5)^3 - @test_expression_with_string aff^1.5 "^(7.1 x + 2.5, 1.5)" - @test_expression_with_string (7.1 * x + 2.5)^1.5 "^(7.1 x + 2.5, 1.5)" + @test_expression_with_string( + aff^2, + "50.41 x² + 35.5 x + 6.25", + inferrable = false + ) + @test_expression_with_string( + (7.1 * x + 2.5)^2, + "50.41 x² + 35.5 x + 6.25", + inferrable = false + ) + @test_expression_with_string(aff^1, "7.1 x + 2.5", inferrable = false) + @test_expression_with_string( + (7.1 * x + 2.5)^1, + "7.1 x + 2.5", + inferrable = false + ) + @test_expression_with_string(aff^0, "1", inferrable = false) + @test_expression_with_string((7.1 * x + 2.5)^0, "1", inferrable = false) + @test_expression_with_string(aff^3, "(7.1 x + 2.5 ^ 3)", inferrable = false) + @test_expression_with_string( + (7.1 * x + 2.5)^3, + "(7.1 x + 2.5 ^ 3)", + inferrable = false + ) + @test_expression_with_string aff^1.5 "(7.1 x + 2.5 ^ 1.5)" + @test_expression_with_string (7.1 * x + 2.5)^1.5 "(7.1 x + 2.5 ^ 1.5)" # 3-2 AffExpr--Variable @test_expression_with_string aff + z "7.1 x + z + 2.5" @test_expression_with_string aff - z "7.1 x - z + 2.5" @test_expression_with_string aff * z "7.1 x*z + 2.5 z" - @test_expression_with_string aff / z "/(7.1 x + 2.5, z)" + @test_expression_with_string aff / z "(7.1 x + 2.5 / z)" @test_expression_with_string aff - 7.1 * x "0 x + 2.5" # 3-3 AffExpr--AffExpr @test_expression_with_string aff + aff2 "7.1 x + 1.2 y + 3.7" @test_expression_with_string aff - aff2 "7.1 x - 1.2 y + 1.3" @test_expression_with_string aff * aff2 "8.52 x*y + 3 y + 8.52 x + 3" @test string((x + x) * (x + 3)) == string((x + 3) * (x + x)) # Issue #288 - @test_expression_with_string aff / aff2 "/(7.1 x + 2.5, 1.2 y + 1.2)" + @test_expression_with_string aff / aff2 "(7.1 x + 2.5 / 1.2 y + 1.2)" @test_expression_with_string aff - aff "0 x" # 4-4 AffExpr--QuadExpr @test_expression_with_string aff2 + q "2.5 y*z + 1.2 y + 7.1 x + 3.7" @test_expression_with_string aff2 - q "-2.5 y*z + 1.2 y - 7.1 x - 1.3" - @test_expression_with_string aff2 * q "*(1.2 y + 1.2, 2.5 y*z + 7.1 x + 2.5)" - @test_expression_with_string aff2 / q "/(1.2 y + 1.2, 2.5 y*z + 7.1 x + 2.5)" + @test_expression_with_string aff2 * q "(1.2 y + 1.2 * 2.5 y*z + 7.1 x + 2.5)" + @test_expression_with_string aff2 / q "(1.2 y + 1.2 / 2.5 y*z + 7.1 x + 2.5)" @test transpose(aff) === aff @test conj(aff) === aff return @@ -486,18 +503,18 @@ function test_extension_basic_operators_quadexpr( # 4-2 QuadExpr--Variable @test_expression_with_string q + w "2.5 y*z + 7.1 x + w + 2.5" @test_expression_with_string q - w "2.5 y*z + 7.1 x - w + 2.5" - @test_expression_with_string q * w "*(2.5 y*z + 7.1 x + 2.5, w)" - @test_expression_with_string q / w "/(2.5 y*z + 7.1 x + 2.5, w)" + @test_expression_with_string q * w "(2.5 y*z + 7.1 x + 2.5 * w)" + @test_expression_with_string q / w "(2.5 y*z + 7.1 x + 2.5 / w)" # 4-3 QuadExpr--AffExpr @test_expression_with_string q + aff2 "2.5 y*z + 7.1 x + 1.2 y + 3.7" @test_expression_with_string q - aff2 "2.5 y*z + 7.1 x - 1.2 y + 1.3" - @test_expression_with_string q * aff2 "*(2.5 y*z + 7.1 x + 2.5, 1.2 y + 1.2)" - @test_expression_with_string q / aff2 "/(2.5 y*z + 7.1 x + 2.5, 1.2 y + 1.2)" + @test_expression_with_string q * aff2 "(2.5 y*z + 7.1 x + 2.5 * 1.2 y + 1.2)" + @test_expression_with_string q / aff2 "(2.5 y*z + 7.1 x + 2.5 / 1.2 y + 1.2)" # 4-4 QuadExpr--QuadExpr @test_expression_with_string q + q2 "2.5 y*z + 8 x*z + 7.1 x + 1.2 y + 3.7" @test_expression_with_string q - q2 "2.5 y*z - 8 x*z + 7.1 x - 1.2 y + 1.3" - @test_expression_with_string q * q2 "*(2.5 y*z + 7.1 x + 2.5, 8 x*z + 1.2 y + 1.2)" - @test_expression_with_string q / q2 "/(2.5 y*z + 7.1 x + 2.5, 8 x*z + 1.2 y + 1.2)" + @test_expression_with_string q * q2 "(2.5 y*z + 7.1 x + 2.5 * 8 x*z + 1.2 y + 1.2)" + @test_expression_with_string q / q2 "(2.5 y*z + 7.1 x + 2.5 / 8 x*z + 1.2 y + 1.2)" @test transpose(q) === q @test conj(q) === q return @@ -604,7 +621,7 @@ function test_complex_pow() @test y^0 == (1.0 + 0im) @test y^1 == 0 * y * y + y @test y^2 == y * y - @test_throws ErrorException y^3 + @test isequal_canonical(y^3, NonlinearExpr(:^, Any[y, 3])) return end diff --git a/test/test_variable.jl b/test/test_variable.jl index 6cd56ea84c1..dd6916104ad 100644 --- a/test/test_variable.jl +++ b/test/test_variable.jl @@ -1118,23 +1118,14 @@ end function test_error_messages() model = Model() @variable(model, x) - err = try - x >= 1 - catch err - err - end - function f(s) - return ErrorException( - replace(replace(err.msg, ">= 1" => "$(s) 1"), "`>=`" => "`$(s)`"), - ) - end - @test_throws err 1 >= x - @test_throws f("<=") x <= 1 - @test_throws f("<=") 1 <= x - @test_throws f(">") x > 1 - @test_throws f(">") 1 > x - @test_throws f("<") x < 1 - @test_throws f("<") 1 < x + @test_throws JuMP._logic_error_exception(:>=) x >= 1 + @test_throws JuMP._logic_error_exception(:>=) 1 >= x + @test_throws JuMP._logic_error_exception(:<=) x <= 1 + @test_throws JuMP._logic_error_exception(:<=) 1 <= x + @test_throws JuMP._logic_error_exception(:>) x > 1 + @test_throws JuMP._logic_error_exception(:>) 1 > x + @test_throws JuMP._logic_error_exception(:<) x < 1 + @test_throws JuMP._logic_error_exception(:<) 1 < x return end diff --git a/test/utilities.jl b/test/utilities.jl index b4a59992515..2db40a48acc 100644 --- a/test/utilities.jl +++ b/test/utilities.jl @@ -17,14 +17,17 @@ macro test_expression(expr) end) end -macro test_expression_with_string(expr, str) - return esc( - quote - realized_expr = @inferred $expr - @test string(realized_expr) == $str - @test isequal_canonical(@expression(model, $expr), realized_expr) - end, - ) +macro test_expression_with_string(expr, str, inferrable = true) + code = quote + realized_expr = if $inferrable + @inferred $expr + else + $expr + end + @test string(realized_expr) == $str + @test isequal_canonical(@expression(model, $expr), realized_expr) + end + return esc(code) end function _strip_line_from_error(err::ErrorException) From 30ff0c7edd56f0380418e1a8063aec237944d0ce Mon Sep 17 00:00:00 2001 From: odow Date: Wed, 28 Jun 2023 15:29:49 +1200 Subject: [PATCH 06/23] Updates for GenericModel Add more tests Update More updates Updates Update Simplify printing of NonlinearExpr Rename to GenericNonlinearExpr Fix test Fix operator test printing Add more tests Update Hessian tutorial Update Update docs/src/manual/nlp.md --- docs/make.jl | 6 +- docs/src/manual/expressions.md | 6 +- docs/src/manual/nlp.md | 856 ++++++++++++++++++ docs/src/manual/{nlp_expr.md => nonlinear.md} | 54 +- .../tutorials/nonlinear/nested_problems.jl | 2 +- .../tutorials/nonlinear/querying_hessians.jl | 157 ++-- src/macros.jl | 2 +- src/nlp_expr.jl | 225 +++-- src/operators.jl | 4 +- src/optimizer_interface.jl | 19 + test/test_expr.jl | 6 + test/test_nlp.jl | 51 +- test/test_nlp_expr.jl | 268 +++--- test/test_operator.jl | 50 +- 14 files changed, 1341 insertions(+), 365 deletions(-) create mode 100644 docs/src/manual/nlp.md rename docs/src/manual/{nlp_expr.md => nonlinear.md} (86%) diff --git a/docs/make.jl b/docs/make.jl index 81d5f75fed4..b6cccf0ec0f 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -342,9 +342,10 @@ const _PAGES = [ "manual/objective.md", "manual/containers.md", "manual/solutions.md", - "manual/nlp_expr.md", + "manual/nlp.md", "manual/callbacks.md", "manual/complex.md", + "manual/nonlinear.md", ], jump_api_reference, "Background Information" => @@ -503,9 +504,6 @@ function _validate_pages() continue end filename = replace(joinpath(root, file), doc_src => "") - if filename == "manual/nlp.md" - continue - end if endswith(filename, ".md") && !(filename in set) push!(missing_files, filename) end diff --git a/docs/src/manual/expressions.md b/docs/src/manual/expressions.md index 23637a7d8e1..2aed59d2ce6 100644 --- a/docs/src/manual/expressions.md +++ b/docs/src/manual/expressions.md @@ -388,7 +388,7 @@ julia> aff = x + 1; julia> quad = x^2 + x; julia> expr = cos(x) * sin(quad) + aff -((cos(x) * sin(x² + x)) + x + 1) +(cos(x) * sin(x² + x)) + (x + 1) ``` ### Limitations @@ -414,7 +414,7 @@ julia> model = Model(); julia> @variable(model, x); julia> expr = @expression(model, x < 1) -(x < 1) +x < 1 ``` For technical reasons, other operators that are not overloaded include `||`, @@ -426,7 +426,7 @@ julia> model = Model(); julia> @variable(model, x); julia> expr = @expression(model, ifelse(x < -1 || x >= 1, x^2, 0.0)) -ifelse(((x < -1) || (x >= 1)), x², 0.0) +ifelse((x < -1) || (x >= 1), x², 0.0) ``` ## Initializing arrays diff --git a/docs/src/manual/nlp.md b/docs/src/manual/nlp.md new file mode 100644 index 00000000000..9374510825d --- /dev/null +++ b/docs/src/manual/nlp.md @@ -0,0 +1,856 @@ +```@meta +CurrentModule = JuMP +DocTestSetup = quote + using JuMP +end +DocTestFilters = [r"≤|<=", r"≥|>=", r" == | = ", r" ∈ | in ", r"MathOptInterface|MOI"] +``` + +# Nonlinear Modeling + +!!! info + This page describes the legacy nonlinear interface to JuMP. A new, + experimental nonlinear interface is in development. Find out more by reading + [Nonlinear Modeling](@ref new_nonlinear_interface). + +JuMP has support for general smooth nonlinear (convex and nonconvex) +optimization problems. JuMP is able to provide exact, sparse second-order +derivatives to solvers. This information can improve solver accuracy and +performance. + +There are three main changes to solve nonlinear programs in JuMP. + * Use [`@NLobjective`](@ref) instead of [`@objective`](@ref) + * Use [`@NLconstraint`](@ref) instead of [`@constraint`](@ref) + * Use [`@NLexpression`](@ref) instead of [`@expression`](@ref) + +!!! info + There are some restrictions on what syntax you can use in the `@NLxxx` + macros. Make sure to read the [Syntax notes](@ref). + +## Set a nonlinear objective + +Use [`@NLobjective`](@ref) to set a nonlinear objective. + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x[1:2]); + +julia> @NLobjective(model, Min, exp(x[1]) - sqrt(x[2])) +``` +To modify a nonlinear objective, call [`@NLobjective`](@ref) again. + +## Add a nonlinear constraint + +Use [`@NLconstraint`](@ref) to add a nonlinear constraint. + +```jldoctest nonlinear_constraint +julia> model = Model(); + +julia> @variable(model, x[1:2]); + +julia> @NLconstraint(model, exp(x[1]) <= 1) +exp(x[1]) - 1.0 ≤ 0 + +julia> @NLconstraint(model, [i = 1:2], x[i]^i >= i) +2-element Vector{NonlinearConstraintRef{ScalarShape}}: + x[1] ^ 1.0 - 1.0 ≥ 0 + x[2] ^ 2.0 - 2.0 ≥ 0 + +julia> @NLconstraint(model, con[i = 1:2], prod(x[j] for j = 1:i) == i) +2-element Vector{NonlinearConstraintRef{ScalarShape}}: + (*)(x[1]) - 1.0 = 0 + x[1] * x[2] - 2.0 = 0 +``` + +!!! info + You can only create nonlinear constraints with `<=`, `>=`, and `==`. + More general `Nonlinear`-in-`Set` constraints are not supported. + +Delete a nonlinear constraint using [`delete`](@ref): +```jldoctest nonlinear_constraint +julia> delete(model, con[1]) +``` + +## Create a nonlinear expression + +Use [`@NLexpression`](@ref) to create nonlinear expression objects. The syntax +is identical to [`@expression`](@ref), except that the expression can contain +nonlinear terms. + +```jldoctest nl_expression +julia> model = Model(); + +julia> @variable(model, x[1:2]); + +julia> expr = @NLexpression(model, exp(x[1]) + sqrt(x[2])) +subexpression[1]: exp(x[1]) + sqrt(x[2]) + +julia> my_anon_expr = @NLexpression(model, [i = 1:2], sin(x[i])) +2-element Vector{NonlinearExpression}: + subexpression[2]: sin(x[1]) + subexpression[3]: sin(x[2]) + +julia> @NLexpression(model, my_expr[i = 1:2], sin(x[i])) +2-element Vector{NonlinearExpression}: + subexpression[4]: sin(x[1]) + subexpression[5]: sin(x[2]) +``` + +Nonlinear expression can be used in [`@NLobjective`](@ref), [`@NLconstraint`](@ref), +and even nested in other [`@NLexpression`](@ref)s. + +```jldoctest nl_expression +julia> @NLobjective(model, Min, expr^2 + 1) + +julia> @NLconstraint(model, [i = 1:2], my_expr[i] <= i) +2-element Vector{NonlinearConstraintRef{ScalarShape}}: + subexpression[4] - 1.0 ≤ 0 + subexpression[5] - 2.0 ≤ 0 + +julia> @NLexpression(model, nested[i = 1:2], sin(my_expr[i])) +2-element Vector{NonlinearExpression}: + subexpression[6]: sin(subexpression[4]) + subexpression[7]: sin(subexpression[5]) +``` + +Use [`value`](@ref) to query the value of a nonlinear expression: +```jldoctest nl_expression +julia> set_start_value(x[1], 1.0) + +julia> value(start_value, nested[1]) +0.7456241416655579 + +julia> sin(sin(1.0)) +0.7456241416655579 +``` + +## Create a nonlinear parameter + +For nonlinear models only, JuMP offers a syntax for explicit "parameter" objects, +which are constants in the model that can be efficiently updated between solves. + +Nonlinear parameters are declared by using the [`@NLparameter`](@ref) macro +and may be indexed by arbitrary sets analogously to JuMP variables and +expressions. + +The initial value of the parameter must be provided on the right-hand side of +the `==` sign. + +```jldoctest nonlinear_parameters +julia> model = Model(); + +julia> @variable(model, x); + +julia> @NLparameter(model, p[i = 1:2] == i) +2-element Vector{NonlinearParameter}: + parameter[1] == 1.0 + parameter[2] == 2.0 +``` + +Create anonymous parameters using the `value` keyword: +```jldoctest nonlinear_parameters +julia> anon_parameter = @NLparameter(model, value = 1) +parameter[3] == 1.0 +``` + +!!! info + A parameter is not an optimization variable. It must be fixed to a value with + `==`. If you want a parameter that is `<=` or `>=`, create a variable instead + using [`@variable`](@ref). + +Use [`value`](@ref) and [`set_value`](@ref) to query or update the value of a +parameter. + +```jldoctest nonlinear_parameters +julia> value.(p) +2-element Vector{Float64}: + 1.0 + 2.0 + +julia> set_value(p[2], 3.0) +3.0 + +julia> value.(p) +2-element Vector{Float64}: + 1.0 + 3.0 +``` + +Nonlinear parameters must be used *within nonlinear macros* only. + +### When to use a parameter + +Nonlinear parameters are useful when solving nonlinear models in a sequence: + +```@example +using JuMP, Ipopt +model = Model(Ipopt.Optimizer) +set_silent(model) +@variable(model, z) +@NLparameter(model, x == 1.0) +@NLobjective(model, Min, (z - x)^2) +optimize!(model) +@show value(z) # Equals 1.0. + +# Now, update the value of x to solve a different problem. +set_value(x, 5.0) +optimize!(model) +@show value(z) # Equals 5.0 +nothing #hide +``` + +!!! info + Using nonlinear parameters can be faster than creating a new model from + scratch with updated data because JuMP is able to avoid repeating a number + of steps in processing the model before handing it off to the solver. + +## Syntax notes + +The syntax accepted in nonlinear macros is more restricted than the syntax +for linear and quadratic macros. We note some important points below. + +### Scalar operations only + +Except for the splatting syntax discussed below, all expressions +must be simple scalar operations. You cannot use `dot`, matrix-vector products, +vector slices, etc. +```jldoctest nlp_scalar_only +julia> model = Model(); + +julia> @variable(model, x[1:2]); + +julia> @variable(model, y); + +julia> c = [1, 2]; + +julia> @NLobjective(model, Min, c' * x + 3y) +ERROR: Unexpected array [1 2] in nonlinear expression. Nonlinear expressions may contain only scalar expressions. +[...] +``` + +Translate vector operations into explicit `sum()` operations: +```jldoctest nlp_scalar_only +julia> @NLobjective(model, Min, sum(c[i] * x[i] for i = 1:2) + 3y) +``` + +Or use an [`@expression`](@ref): +```jldoctest nlp_scalar_only +julia> @expression(model, expr, c' * x) +x[1] + 2 x[2] + +julia> @NLobjective(model, Min, expr + 3y) + +``` + +### Splatting + +The [splatting operator](https://docs.julialang.org/en/v1/manual/faq/#...-splits-one-argument-into-many-different-arguments-in-function-calls-1) + `...` is recognized in a very restricted setting for expanding function + arguments. The expression splatted can be *only* a symbol. More complex + expressions are not recognized. + +```jldoctest; filter=r"≤|<=" +julia> model = Model(); + +julia> @variable(model, x[1:3]); + +julia> @NLconstraint(model, *(x...) <= 1.0) +x[1] * x[2] * x[3] - 1.0 ≤ 0 + +julia> @NLconstraint(model, *((x / 2)...) <= 0.0) +ERROR: Unsupported use of the splatting operator. JuMP supports splatting only symbols. For example, `x...` is ok, but `(x + 1)...`, `[x; y]...` and `g(f(y)...)` are not. +``` + +## [User-defined Functions](@id old_user_defined_functions) + +JuMP natively supports the set of univariate and multivariate functions recognized by the +`MOI.Nonlinear` submodule. In addition to this list of functions, it is possible +to register custom *user-defined* nonlinear functions. User-defined functions +can be used anywhere in [`@NLobjective`](@ref), [`@NLconstraint`](@ref), and +[`@NLexpression`](@ref). + +JuMP will attempt to automatically register functions it detects in your +nonlinear expressions, which usually means manually registering a function is +not needed. Two exceptions are if you want to provide custom derivatives, or if +the function is not available in the scope of the nonlinear expression. + +!!! warning + User-defined functions must return a scalar output. For a work-around, see + [User-defined functions with vector outputs](@ref). + +### Automatic differentiation + +JuMP does not support black-box optimization, so all user-defined functions must +provide derivatives in some form. Fortunately, JuMP supports **automatic +differentiation of user-defined functions**, a feature to our knowledge not +available in any comparable modeling systems. + +!!! info + Automatic differentiation is *not* finite differencing. JuMP's automatically + computed derivatives are not subject to approximation error. + +JuMP uses [ForwardDiff.jl](https://github.com/JuliaDiff/ForwardDiff.jl) to +perform automatic differentiation; see the ForwardDiff.jl +[documentation](https://www.juliadiff.org/ForwardDiff.jl/v0.10.2/user/limitations.html) +for a description of how to write a function suitable for automatic +differentiation. + +#### Common mistakes when writing a user-defined function + +!!! warning + Get an error like `No method matching Float64(::ForwardDiff.Dual)`? Read + this section, and see the guidelines at [ForwardDiff.jl](https://www.juliadiff.org/ForwardDiff.jl/release-0.10/user/limitations.html). + +The most common error is that your user-defined function is not generic with +respect to the number type, that is, don't assume that the input to the function +is `Float64`. +```julia +f(x::Float64) = 2 * x # This will not work. +f(x::Real) = 2 * x # This is good. +f(x) = 2 * x # This is also good. +``` + +Another reason you may encounter this error is if you create arrays inside +your function which are `Float64`. +```julia +function bad_f(x...) + y = zeros(length(x)) # This constructs an array of `Float64`! + for i = 1:length(x) + y[i] = x[i]^i + end + return sum(y) +end + +function good_f(x::T...) where {T<:Real} + y = zeros(T, length(x)) # Construct an array of type `T` instead! + for i = 1:length(x) + y[i] = x[i]^i + end + return sum(y) +end +``` + +### Register a function + +To register a user-defined function with derivatives computed by +automatic differentiation, use the [`register`](@ref) method as in the following +example: + +```@example +using JuMP #hide +square(x) = x^2 +f(x, y) = (x - 1)^2 + (y - 2)^2 + +model = Model() + +register(model, :square, 1, square; autodiff = true) +register(model, :my_f, 2, f; autodiff = true) + +@variable(model, x[1:2] >= 0.5) +@NLobjective(model, Min, my_f(x[1], square(x[2]))) +``` + +The above code creates a JuMP model with the objective function +`(x[1] - 1)^2 + (x[2]^2 - 2)^2`. The arguments to [`register`](@ref) are: + 1. The model for which the functions are registered. + 2. A Julia symbol object which serves as the name of the user-defined function + in JuMP expressions. + 3. The number of input arguments that the function takes. + 4. The Julia method which computes the function + 5. A flag to instruct JuMP to compute exact gradients automatically. + +!!! tip + The symbol `:my_f` doesn't have to match the name of the function `f`. + However, it's more readable if it does. Make sure you use `my_f` + and not `f` in the macros. + +!!! warning + User-defined functions cannot be re-registered and will not update if you + modify the underlying Julia function. If you want to change a user-defined + function between solves, rebuild the model or use a different name. To use + a different name programmatically, see [Raw expression input](@ref). + +### Register a function and gradient + +Forward-mode automatic differentiation as implemented by ForwardDiff.jl has a +computational cost that scales linearly with the number of input dimensions. As +such, it is not the most efficient way to compute gradients of user-defined +functions if the number of input arguments is large. In this case, users may +want to provide their own routines for evaluating gradients. + +#### Univariate functions + +For univariate functions, the gradient function `∇f` returns a number that +represents the first-order derivative: +```@example +using JuMP #hide +f(x) = x^2 +∇f(x) = 2x +model = Model() +register(model, :my_square, 1, f, ∇f; autodiff = true) +@variable(model, x >= 0) +@NLobjective(model, Min, my_square(x)) +``` +If `autodiff = true`, JuMP will use automatic differentiation to compute the +hessian. + +#### Multivariate functions + +For multivariate functions, the gradient function `∇f` must take a gradient +vector as the first argument that is filled in-place: +```@example +using JuMP #hide +f(x, y) = (x - 1)^2 + (y - 2)^2 +function ∇f(g::AbstractVector{T}, x::T, y::T) where {T} + g[1] = 2 * (x - 1) + g[2] = 2 * (y - 2) + return +end + +model = Model() +register(model, :my_square, 2, f, ∇f) +@variable(model, x[1:2] >= 0) +@NLobjective(model, Min, my_square(x[1], x[2])) +``` + +!!! warning + Make sure the first argument to `∇f` supports an `AbstractVector`, and do + not assume the input is `Float64`. + +### Register a function, gradient, and hessian + +You can also register a function with the second-order derivative information, +which is a scalar for univariate functions, and a symmetric matrix for +multivariate functions. + +#### Univariate functions + +Pass a function which returns a number representing the second-order derivative: +```@example +using JuMP #hide +f(x) = x^2 +∇f(x) = 2x +∇²f(x) = 2 +model = Model() +register(model, :my_square, 1, f, ∇f, ∇²f) +@variable(model, x >= 0) +@NLobjective(model, Min, my_square(x)) +``` + +#### Multivariate functions + +For multivariate functions, the hessian function `∇²f` must take an +`AbstractMatrix` as the first argument, the lower-triangular of which is filled +in-place: +```@example +using JuMP #hide +f(x...) = (1 - x[1])^2 + 100 * (x[2] - x[1]^2)^2 +function ∇f(g, x...) + g[1] = 400 * x[1]^3 - 400 * x[1] * x[2] + 2 * x[1] - 2 + g[2] = 200 * (x[2] - x[1]^2) + return +end +function ∇²f(H, x...) + H[1, 1] = 1200 * x[1]^2 - 400 * x[2] + 2 + # H[1, 2] = -400 * x[1] <-- Not needed. Fill the lower-triangular only. + H[2, 1] = -400 * x[1] + H[2, 2] = 200.0 + return +end + +model = Model() +register(model, :rosenbrock, 2, f, ∇f, ∇²f) +@variable(model, x[1:2]) +@NLobjective(model, Min, rosenbrock(x[1], x[2])) +``` + +!!! warning + You may assume the Hessian matrix `H` is initialized with zeros, and because + `H` is symmetric, you need only to fill in the non-zero of the + lower-triangular terms. The matrix type passed in as `H` depends on the + automatic differentiation system, so make sure the first argument to the + Hessian function supports an `AbstractMatrix` (it may be something other + than `Matrix{Float64}`). However, you may assume only that `H` supports + `size(H)` and `setindex!`. Finally, the matrix is treated as dense, so the + performance will be poor on functions with high-dimensional input. + +### User-defined functions with vector inputs + +User-defined functions which take vectors as input arguments (for example, +`f(x::Vector)`) are *not* supported. Instead, use Julia's splatting syntax to +create a function with scalar arguments. For example, instead of +```julia +f(x::Vector) = sum(x[i]^i for i in 1:length(x)) +``` +define: +```julia +f(x...) = sum(x[i]^i for i in 1:length(x)) +``` + +This function `f` can be used in a JuMP model as follows: +```@example +using JuMP #hide +model = Model() +@variable(model, x[1:5] >= 0) +f(x...) = sum(x[i]^i for i in 1:length(x)) +register(model, :f, 5, f; autodiff = true) +@NLobjective(model, Min, f(x...)) +``` + +!!! tip + Make sure to read the syntax restrictions of [Splatting](@ref). + +## Factors affecting solution time + +The execution time when solving a nonlinear programming problem can be divided +into two parts, the time spent in the optimization algorithm (the solver) and +the time spent evaluating the nonlinear functions and corresponding derivatives. +Ipopt explicitly displays these two timings in its output, for example: + +``` +Total CPU secs in IPOPT (w/o function evaluations) = 7.412 +Total CPU secs in NLP function evaluations = 2.083 +``` + +For Ipopt in particular, one can improve the performance by installing advanced +sparse linear algebra packages, see [Installation Guide](@ref). For other +solvers, see their respective documentation for performance tips. + +The function evaluation time, on the other hand, is the responsibility of the +modeling language. JuMP computes derivatives by using reverse-mode automatic +differentiation with graph coloring methods for exploiting sparsity of the +Hessian matrix. As a conservative bound, JuMP's performance here currently +may be expected to be within a factor of 5 of AMPL's. Our [paper in +SIAM Review](https://mlubin.github.io/pdf/jump-sirev.pdf) has more details. + +## Querying derivatives from a JuMP model + +For some advanced use cases, one may want to directly query the derivatives of a +JuMP model instead of handing the problem off to a solver. +Internally, JuMP implements the [`MOI.AbstractNLPEvaluator`](@ref) interface. To +obtain an NLP evaluator object from a JuMP model, use [`NLPEvaluator`](@ref). +[`index`](@ref) returns the [`MOI.VariableIndex`](@ref) corresponding to a JuMP +variable. `MOI.VariableIndex` itself is a type-safe wrapper for `Int64` (stored +in the `.value` field.) + +For example: + +```jldoctest derivatives +julia> raw_index(v::MOI.VariableIndex) = v.value +raw_index (generic function with 1 method) + +julia> model = Model(); + +julia> @variable(model, x) +x + +julia> @variable(model, y) +y + +julia> @NLobjective(model, Min, sin(x) + sin(y)) + +julia> values = zeros(2) +2-element Vector{Float64}: + 0.0 + 0.0 + +julia> x_index = raw_index(JuMP.index(x)) +1 + +julia> y_index = raw_index(JuMP.index(y)) +2 + +julia> values[x_index] = 2.0 +2.0 + +julia> values[y_index] = 3.0 +3.0 + +julia> d = NLPEvaluator(model) +Nonlinear.Evaluator with available features: + * :Grad + * :Jac + * :JacVec + * :Hess + * :HessVec + * :ExprGraph + +julia> MOI.initialize(d, [:Grad]) + +julia> MOI.eval_objective(d, values) +1.0504174348855488 + +julia> sin(2.0) + sin(3.0) +1.0504174348855488 + +julia> ∇f = zeros(2) +2-element Vector{Float64}: + 0.0 + 0.0 + +julia> MOI.eval_objective_gradient(d, ∇f, values) + +julia> ∇f[x_index], ∇f[y_index] +(-0.4161468365471424, -0.9899924966004454) + +julia> cos(2.0), cos(3.0) +(-0.4161468365471424, -0.9899924966004454) +``` + +Only nonlinear constraints (those added with [`@NLconstraint`](@ref)), and +nonlinear objectives (added with [`@NLobjective`](@ref)) exist in the scope of +the [`NLPEvaluator`](@ref). + +The [`NLPEvaluator`](@ref) *does not evaluate derivatives of linear or quadratic +constraints or objectives*. + +The [`index`](@ref) method applied to a nonlinear constraint reference object +returns its index as a [`MOI.Nonlinear.ConstraintIndex`](@ref). For example: + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> @NLconstraint(model, cons1, sin(x) <= 1); + +julia> @NLconstraint(model, cons2, x + 5 == 10); + +julia> typeof(cons1) +NonlinearConstraintRef{ScalarShape} (alias for ConstraintRef{GenericModel{Float64}, MathOptInterface.Nonlinear.ConstraintIndex, ScalarShape}) + +julia> index(cons1) +MathOptInterface.Nonlinear.ConstraintIndex(1) + +julia> index(cons2) +MathOptInterface.Nonlinear.ConstraintIndex(2) +``` + +```@meta +# TODO: Provide a link for how to access the linear and quadratic parts of the +# model. +``` + +Note that for one-sided nonlinear constraints, JuMP subtracts any values on the +right-hand side when computing expressions. In other words, one-sided nonlinear +constraints are always transformed to have a right-hand side of zero. + +This method of querying derivatives directly from a JuMP model is convenient for +interacting with the model in a structured way, for example, for accessing derivatives +of specific variables. For example, in statistical maximum likelihood estimation +problems, one is often interested in the Hessian matrix at the optimal solution, +which can be queried using the [`NLPEvaluator`](@ref). + +## Raw expression input + +!!! warning + This section requires advanced knowledge of Julia's `Expr`. You should read + the [Expressions and evaluation](https://docs.julialang.org/en/v1/manual/metaprogramming/#Expressions-and-evaluation) + section of the Julia documentation first. + +In addition to the [`@NLexpression`](@ref), [`@NLobjective`](@ref) and +[`@NLconstraint`](@ref) macros, it is also possible to provide Julia `Expr` +objects directly by using [`add_nonlinear_expression`](@ref), +[`set_nonlinear_objective`](@ref) and [`add_nonlinear_constraint`](@ref). + +This input form may be useful if the expressions are generated programmatically, +or if you experience compilation issues with the macro input (see +[Known performance issues](@ref) for more information). + +### Add a nonlinear expression + +Use [`add_nonlinear_expression`](@ref) to add a nonlinear expression to the model. + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x) +x + +julia> expr = :($(x) + sin($(x)^2)) +:(x + sin(x ^ 2)) + +julia> expr_ref = add_nonlinear_expression(model, expr) +subexpression[1]: x + sin(x ^ 2.0) +``` +This is equivalent to +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> expr_ref = @NLexpression(model, x + sin(x^2)) +subexpression[1]: x + sin(x ^ 2.0) +``` + +!!! note + You must interpolate the variables directly into the expression `expr`. + +### Set the objective function + +Use [`set_nonlinear_objective`](@ref) to set a nonlinear objective. + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> expr = :($(x) + $(x)^2) +:(x + x ^ 2) + +julia> set_nonlinear_objective(model, MIN_SENSE, expr) +``` +This is equivalent to +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> @NLobjective(model, Min, x + x^2) +``` + +!!! note + You must use `MIN_SENSE` or `MAX_SENSE` instead of `Min` and `Max`. + +### Add a constraint + +Use [`add_nonlinear_constraint`](@ref) to add a nonlinear constraint. + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> expr = :($(x) + $(x)^2) +:(x + x ^ 2) + +julia> add_nonlinear_constraint(model, :($(expr) <= 1)) +(x + x ^ 2.0) - 1.0 ≤ 0 +``` + +This is equivalent to +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> @NLconstraint(model, Min, x + x^2 <= 1) +(x + x ^ 2.0) - 1.0 ≤ 0 +``` + +### More complicated examples + +Raw expression input is most useful when the expressions are generated +programmatically, often in conjunction with user-defined functions. + +As an example, we construct a model with the nonlinear constraints `f(x) <= 1`, +where `f(x) = x^2` and `f(x) = sin(x)^2`: +```jldoctest +julia> function main(functions::Vector{Function}) + model = Model() + @variable(model, x) + for (i, f) in enumerate(functions) + f_sym = Symbol("f_$(i)") + register(model, f_sym, 1, f; autodiff = true) + add_nonlinear_constraint(model, :($(f_sym)($(x)) <= 1)) + end + print(model) + return + end +main (generic function with 1 method) + +julia> main([x -> x^2, x -> sin(x)^2]) +Feasibility +Subject to + f_1(x) - 1.0 ≤ 0 + f_2(x) - 1.0 ≤ 0 +``` + +As another example, we construct a model with the constraint +`x^2 + sin(x)^2 <= 1`: +```jldoctest +julia> function main(functions::Vector{Function}) + model = Model() + @variable(model, x) + expr = Expr(:call, :+) + for (i, f) in enumerate(functions) + f_sym = Symbol("f_$(i)") + register(model, f_sym, 1, f; autodiff = true) + push!(expr.args, :($(f_sym)($(x)))) + end + add_nonlinear_constraint(model, :($(expr) <= 1)) + print(model) + return + end +main (generic function with 1 method) + +julia> main([x -> x^2, x -> sin(x)^2]) +Feasibility +Subject to + (f_1(x) + f_2(x)) - 1.0 ≤ 0 +``` + +### Registered functions with a variable number of arguments + +User defined functions require a fixed number of input arguments. However, +sometimes you will want to use a registered function like: +```jldoctest nlp_register_variable_arguments +julia> f(x...) = sum(exp(x[i]^2) for i in 1:length(x)); +``` +with different numbers of arguments. + +The solution is to register the same function `f` for each unique number of +input arguments, making sure to use a unique name each time. For example: + +```jldoctest nlp_register_variable_arguments +julia> A = [[1], [1, 2], [2, 3, 4], [1, 3, 4, 5]]; + +julia> model = Model(); + +julia> @variable(model, x[1:5]); + +julia> funcs = Set{Symbol}(); + +julia> for a in A + key = Symbol("f$(length(a))") + if !(key in funcs) + push!(funcs, key) + register(model, key, length(a), f; autodiff = true) + end + add_nonlinear_constraint(model, :($key($(x[a]...)) <= 1)) + end + +julia> print(model) +Feasibility +Subject to + f1(x[1]) - 1.0 ≤ 0 + f2(x[1], x[2]) - 1.0 ≤ 0 + f3(x[2], x[3], x[4]) - 1.0 ≤ 0 + f4(x[1], x[3], x[4], x[5]) - 1.0 ≤ 0 +``` + +## Known performance issues + +The macro-based input to JuMP's nonlinear interface can cause a performance +issue if you: + + 1. write a macro with a large number (hundreds) of terms + 2. call that macro from within a function instead of from the top-level in + global scope. + +The first issue does not depend on the number of resulting terms in the +mathematical expression, but rather the number of terms in the Julia `Expr` +representation of that expression. For example, the expression +`sum(x[i] for i in 1:1_000_000)` contains one million mathematical terms, but +the `Expr` representation is just a single sum. + +The most common cause, other than a lot of tedious typing, is if you write a +program that automatically writes a JuMP model as a text file, which you later +execute. One example is [MINLPlib.jl](https://github.com/lanl-ansi/MINLPLib.jl) +which automatically transpiled models in the GAMS scalar format into JuMP +examples. + +As a rule of thumb, if you are writing programs to automatically generate +expressions for the JuMP macros, you should target the [Raw expression input](@ref) +instead. For more information, read [MathOptInterface Issue#1997](https://github.com/jump-dev/MathOptInterface.jl/issues/1997). diff --git a/docs/src/manual/nlp_expr.md b/docs/src/manual/nonlinear.md similarity index 86% rename from docs/src/manual/nlp_expr.md rename to docs/src/manual/nonlinear.md index d2fa7c754ad..2a2d7ec563d 100644 --- a/docs/src/manual/nlp_expr.md +++ b/docs/src/manual/nonlinear.md @@ -6,7 +6,14 @@ end DocTestFilters = [r"≤|<=", r"≥|>=", r" == | = ", r" ∈ | in ", r"MathOptInterface|MOI"] ``` -# Nonlinear Modeling +# [Nonlinear Modeling](@id new_nonlinear_interface) + +!!! warning + This page describes an experimental nonlinear interface to JuMP. The API + described below is stable, and it will not break with future 1.X releases of + JuMP. However, solver support may be limited, and there may be gaps in + functionality compared with [Nonlinear Modeling](@ref). To report a bug, or + request a missing feature, please [open an issue](https://github.com/jump-dev/JuMP.jl/issues/new/choose). JuMP has support for general smooth nonlinear (convex and nonconvex) optimization problems. JuMP is able to provide exact, sparse second-order @@ -23,7 +30,7 @@ julia> model = Model(); julia> @variable(model, x[1:2]); julia> @objective(model, Min, exp(x[1]) - sqrt(x[2])) -(exp(x[1]) - sqrt(x[2])) +exp(x[1]) - sqrt(x[2]) ``` To modify a nonlinear objective, call [`@objective`](@ref) again. @@ -38,12 +45,12 @@ julia> model = Model(); julia> @variable(model, x[1:2]); julia> @constraint(model, exp(x[1]) <= 1) -(exp(x[1]) - 1.0) ≤ 0 +exp(x[1]) - 1.0 ≤ 0 julia> @constraint(model, con[i = 1:2], 2^x[i] >= i) 2-element Vector{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarNonlinearFunction, MathOptInterface.GreaterThan{Float64}}, ScalarShape}}: - con[1] : ((2.0 ^ x[1]) - 1.0) ≥ 0 - con[2] : ((2.0 ^ x[2]) - 2.0) ≥ 0 + con[1] : (2.0 ^ x[1]) - 1.0 ≥ 0 + con[2] : (2.0 ^ x[2]) - 2.0 ≥ 0 ``` Delete a nonlinear constraint using [`delete`](@ref): @@ -63,15 +70,15 @@ julia> model = Model(); julia> @variable(model, x[1:2]); julia> expr = @expression(model, exp(x[1]) + sqrt(x[2])) -(exp(x[1]) + sqrt(x[2])) +exp(x[1]) + sqrt(x[2]) julia> my_anon_expr = @expression(model, [i = 1:2], sin(x[i])) -2-element Vector{NonlinearExpr{VariableRef}}: +2-element Vector{NonlinearExpr}: sin(x[1]) sin(x[2]) julia> @expression(model, my_expr[i = 1:2], sin(x[i])) -2-element Vector{NonlinearExpr{VariableRef}}: +2-element Vector{NonlinearExpr}: sin(x[1]) sin(x[2]) ``` @@ -81,15 +88,15 @@ A [`NonlinearExpr`](@ref) can be used in [`@objective`](@ref), ```jldoctest nl_expression julia> @objective(model, Min, expr^2 + 1) -(((exp(x[1]) + sqrt(x[2])) ^ 2.0) + 1.0) +((exp(x[1]) + sqrt(x[2])) ^ 2.0) + 1.0 julia> @constraint(model, [i = 1:2], my_expr[i] <= i) 2-element Vector{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarNonlinearFunction, MathOptInterface.LessThan{Float64}}, ScalarShape}}: - (sin(x[1]) - 1.0) ≤ 0 - (sin(x[2]) - 2.0) ≤ 0 + sin(x[1]) - 1.0 ≤ 0 + sin(x[2]) - 2.0 ≤ 0 julia> @expression(model, nested[i = 1:2], sin(my_expr[i])) -2-element Vector{NonlinearExpr{VariableRef}}: +2-element Vector{NonlinearExpr}: sin(sin(x[1])) sin(sin(x[2])) ``` @@ -326,26 +333,3 @@ function good_f(x::T...) where {T<:Real} return sum(y) end ``` - -## Factors affecting solution time - -The execution time when solving a nonlinear programming problem can be divided -into two parts, the time spent in the optimization algorithm (the solver) and -the time spent evaluating the nonlinear functions and corresponding derivatives. -Ipopt explicitly displays these two timings in its output, for example: - -``` -Total CPU secs in IPOPT (w/o function evaluations) = 7.412 -Total CPU secs in NLP function evaluations = 2.083 -``` - -For Ipopt in particular, one can improve the performance by installing advanced -sparse linear algebra packages, see [Installation Guide](@ref). For other -solvers, see their respective documentation for performance tips. - -The function evaluation time, on the other hand, is the responsibility of the -modeling language. JuMP computes derivatives by using reverse-mode automatic -differentiation with graph coloring methods for exploiting sparsity of the -Hessian matrix. As a conservative bound, JuMP's performance here currently -may be expected to be within a factor of 5 of AMPL's. Our [paper in -SIAM Review](https://mlubin.github.io/pdf/jump-sirev.pdf) has more details. diff --git a/docs/src/tutorials/nonlinear/nested_problems.jl b/docs/src/tutorials/nonlinear/nested_problems.jl index c24b0d546b0..dc6e420a16b 100644 --- a/docs/src/tutorials/nonlinear/nested_problems.jl +++ b/docs/src/tutorials/nonlinear/nested_problems.jl @@ -77,7 +77,7 @@ function solve_lower_level(x...) @objective( model, Max, - x[1]^2 * y[1] + x[2]^2 * y[2] - x[1] * y[1]^4.0 - 2 * x[2] * y[2]^4.0, + x[1]^2 * y[1] + x[2]^2 * y[2] - x[1] * y[1]^4 - 2 * x[2] * y[2]^4, ) @constraint(model, (y[1] - 10)^2 + (y[2] - 10)^2 <= 25) optimize!(model) diff --git a/docs/src/tutorials/nonlinear/querying_hessians.jl b/docs/src/tutorials/nonlinear/querying_hessians.jl index e70f0975d9e..d4a44ee72cc 100644 --- a/docs/src/tutorials/nonlinear/querying_hessians.jl +++ b/docs/src/tutorials/nonlinear/querying_hessians.jl @@ -102,29 +102,54 @@ analytic_hessian([1, 1], 0, [0, 1]) analytic_hessian([1, 1], 1, [0, 0]) -#- +# ## Create a nonlinear model + +# JuMP delegates automatic differentiation to the `MOI.Nonlinear` submodule. +# Therefore, to compute the Hessian of the Lagrangian, we need to create a +# [`MOI.Nonlinear.Model`](@ref) object: -# ## Initializing the NLPEvaluator +rows = Any[] +nlp = MOI.Nonlinear.Model() +for (F, S) in list_of_constraint_types(model) + if F <: VariableRef + continue # Skip variable bounds + end + for ci in all_constraints(model, F, S) + push!(rows, ci) + object = constraint_object(ci) + MOI.Nonlinear.add_constraint(nlp, object.func, object.set) + end +end +MOI.Nonlinear.set_objective(nlp, objective_function(model)) +nlp -# JuMP stores all information relating to the nonlinear portions of the model in -# a [`NLPEvaluator`](@ref) struct: +# It is important that we save the constraint indices in a vector `rows`, so +# that we know the order of the constraints in the nonlinear model. -d = NLPEvaluator(model) +# Next, we need to convert our model into an [`MOI.Nonlinear.Evaluator`](@ref), +# specifying an automatic differentiation backend. In this case, we use +# [`MOI.Nonlinear.SparseReverseMode`](@ref): -# Before computing anything with the NLPEvaluator, we need to initialize it. +evaluator = MOI.Nonlinear.Evaluator( + nlp, + MOI.Nonlinear.SparseReverseMode(), + index.(all_variables(model)), +) + +# Before computing anything with the evaluator, we need to initialize it. # Use [`MOI.features_available`](@ref) to see what we can query: -MOI.features_available(d) +MOI.features_available(evaluator) -# Consult the MOI documentation for specifics. But to obtain the Hessian matrix, +# Consult the MOI documentation for specifics, but to obtain the Hessian matrix, # we need to initialize `:Hess`: -MOI.initialize(d, [:Hess]) +MOI.initialize(evaluator, [:Hess]) # MOI represents the Hessian as a sparse matrix. Get the sparsity pattern as # follows: -hessian_sparsity = MOI.hessian_lagrangian_structure(d) +hessian_sparsity = MOI.hessian_lagrangian_structure(evaluator) # The sparsity pattern has a few properties of interest: # * Each element `(i, j)` indicates a structural non-zero in row `i` and column @@ -146,8 +171,7 @@ H = SparseArrays.sparse(I, J, V, n, n) # Of course, knowing where the zeros are isn't very interesting. We really want # to compute the value of the Hessian matrix at a point. -num_g = num_nonlinear_constraints(model) -MOI.eval_hessian_lagrangian(d, V, ones(n), 1.0, ones(num_g)) +MOI.eval_hessian_lagrangian(evaluator, V, ones(n), 1.0, ones(length(rows))) H = SparseArrays.sparse(I, J, V, n, n) # In practice, we often want to compute the value of the hessian at the optimal @@ -164,13 +188,14 @@ x = all_variables(model) x_optimal = value.(x) # Next, we need the optimal dual solution associated with the nonlinear -# constraints: +# constraints (this is where it is important to record the order of the +# constraints as we added them to `nlp`): -y_optimal = dual.(all_nonlinear_constraints(model)) +y_optimal = dual.(rows) # Now we can compute the Hessian at the optimal primal-dual point: -MOI.eval_hessian_lagrangian(d, V, x_optimal, 1.0, y_optimal) +MOI.eval_hessian_lagrangian(evaluator, V, x_optimal, 1.0, y_optimal) H = SparseArrays.sparse(I, J, V, n, n) # However, this Hessian isn't quite right because it isn't symmetric. We can fix @@ -192,95 +217,29 @@ end fill_off_diagonal(H) -# Moreover, this Hessian only accounts for the objective and constraints entered -# using [`@NLobjective`](@ref) and [`@NLconstraint`](@ref). If we want to take -# quadratic objectives and constraints written using [`@objective`](@ref) or -# [`@constraint`](@ref) into account, we'll need to handle them separately. - -# !!! tip -# If you don't want to do this, you can replace calls to [`@objective`](@ref) -# and [`@constraint`](@ref) with [`@NLobjective`](@ref) and -# [`@NLconstraint`](@ref). - -# ## Hessians from QuadExpr functions - -# To compute the hessian from a quadratic expression, let's see how JuMP -# represents a quadratic constraint: - -f = constraint_object(g_1).func - -# `f` is a quadratic expression of the form: -# ``` -# f(x) = Σqᵢⱼ * xᵢ * xⱼ + Σaᵢ xᵢ + c -# ``` -# So `∇²f(x)` is the matrix formed by `[qᵢⱼ]ᵢⱼ` if `i != j` and `2[qᵢⱼ]ᵢⱼ` if `i = j`. - -variables_to_column = Dict(x[i] => i for i in 1:n) - -function add_to_hessian(H, f::QuadExpr, μ) - for (vars, coef) in f.terms - i = variables_to_column[vars.a] - j = variables_to_column[vars.b] - H[i, j] += μ * coef - end - return -end - -# If the function `f` is not a `QuadExpr`, do nothing because it is an `AffExpr` -# or a `VariableRef`. In both cases, the second derivative is zero. - -add_to_hessian(H, f::Any, μ) = nothing - -# Then we iterate over all constraints in the model and add their Hessian -# components: - -for (F, S) in list_of_constraint_types(model) - for cref in all_constraints(model, F, S) - f = constraint_object(cref).func - add_to_hessian(H, f, dual(cref)) - end -end - -H - -# Finally, we need to take into account the objective function: - -add_to_hessian(H, objective_function(model), 1.0) - -fill_off_diagonal(H) - # Putting everything together: -function compute_optimal_hessian(model) - d = NLPEvaluator(model) - MOI.initialize(d, [:Hess]) - hessian_sparsity = MOI.hessian_lagrangian_structure(d) - I = [i for (i, _) in hessian_sparsity] - J = [j for (_, j) in hessian_sparsity] - V = zeros(length(hessian_sparsity)) - x = all_variables(model) - x_optimal = value.(x) - y_optimal = dual.(all_nonlinear_constraints(model)) - MOI.eval_hessian_lagrangian(d, V, x_optimal, 1.0, y_optimal) - n = num_variables(model) - H = SparseArrays.sparse(I, J, V, n, n) - vmap = Dict(x[i] => i for i in 1:n) - add_to_hessian(H, f::Any, μ) = nothing - function add_to_hessian(H, f::QuadExpr, μ) - for (vars, coef) in f.terms - if vars.a != vars.b - H[vmap[vars.a], vmap[vars.b]] += μ * coef - else - H[vmap[vars.a], vmap[vars.b]] += 2 * μ * coef - end - end - end +function compute_optimal_hessian(model::Model) + rows = Any[] + nlp = MOI.Nonlinear.Model() for (F, S) in list_of_constraint_types(model) - for cref in all_constraints(model, F, S) - add_to_hessian(H, constraint_object(cref).func, dual(cref)) + for ci in all_constraints(model, F, S) + push!(rows, ci) + object = constraint_object(ci) + MOI.Nonlinear.add_constraint(nlp, object.func, object.set) end end - add_to_hessian(H, objective_function(model), 1.0) + MOI.Nonlinear.set_objective(nlp, objective_function(model)) + x = all_variables(model) + backend = MOI.Nonlinear.SparseReverseMode() + evaluator = MOI.Nonlinear.Evaluator(nlp, backend, index.(x)) + MOI.initialize(evaluator, [:Hess]) + hessian_sparsity = MOI.hessian_lagrangian_structure(evaluator) + I = [i for (i, _) in hessian_sparsity] + J = [j for (_, j) in hessian_sparsity] + V = zeros(length(hessian_sparsity)) + MOI.eval_hessian_lagrangian(evaluator, V, value.(x), 1.0, dual.(rows)) + H = SparseArrays.sparse(I, J, V, length(x), length(x)) return Matrix(fill_off_diagonal(H)) end diff --git a/src/macros.jl b/src/macros.jl index fc120efe225..f331f88b5a3 100644 --- a/src/macros.jl +++ b/src/macros.jl @@ -1743,7 +1743,7 @@ macro expression(args...) $build_code # Don't leak a `_MA.Zero` if the expression is an empty summation, or # other structure that returns `_MA.Zero()`. - _replace_zero($m, $code) + _replace_zero($m, $expr_var) end code = Containers.container_code(idxvars, indices, code, requested_container) diff --git a/src/nlp_expr.jl b/src/nlp_expr.jl index cf80125963a..0dfd6c54b91 100644 --- a/src/nlp_expr.jl +++ b/src/nlp_expr.jl @@ -4,8 +4,8 @@ # file, You can obtain one at https://mozilla.org/MPL/2.0/. """ - NonlinearExpr{V}(head::Symbol, args::Vector{Any}) - NonlinearExpr{V}(head::Symbol, args::Any...) + GenericNonlinearExpr{V}(head::Symbol, args::Vector{Any}) + GenericNonlinearExpr{V}(head::Symbol, args::Any...) The scalar-valued nonlinear function `head(args...)`, represented as a symbolic expression tree, with the call operator `head` and ordered arguments in `args`. @@ -37,14 +37,14 @@ The vector `args` contains the arguments to the nonlinear function. If the operator is univariate, it must contain one element. Otherwise, it may contain multiple elements. -Given a subtype of [`AbstractVariableRef`](@ref), `V`, for `NonlinearExpr{V}`, +Given a subtype of [`AbstractVariableRef`](@ref), `V`, for `GenericNonlinearExpr{V}`, each element must be one of the following: * A constant value of type `<:Number` * A `V` * A [`GenericAffExpr{C,V}`](@ref) * A [`GenericQuadExpr{C,V}`](@ref) - * A [`NonlinearExpr{V}`](@ref) + * A [`GenericNonlinearExpr{V}`](@ref) ## Unsupported operators @@ -66,17 +66,21 @@ julia> @variable(model, x) x julia> f = sin(x)^2 -^(sin(x), 2.0) - -julia> f = NonlinearExpr(:^, NonlinearExpr(:sin, x), 2.0) -^(sin(x), 2.0) +sin(x) ^ 2.0 + +julia> f = GenericNonlinearExpr{VariableRef}( + :^, + GenericNonlinearExpr{VariableRef}(:sin, x), + 2.0, + ) +sin(x) ^ 2.0 ``` """ -struct NonlinearExpr{V<:AbstractVariableRef} <: AbstractJuMPScalar +struct GenericNonlinearExpr{V<:AbstractVariableRef} <: AbstractJuMPScalar head::Symbol args::Vector{Any} - function NonlinearExpr(head::Symbol, args::Vector{Any}) + function GenericNonlinearExpr(head::Symbol, args::Vector{Any}) index = findfirst(Base.Fix2(isa, AbstractJuMPScalar), args) if index === nothing error( @@ -87,7 +91,7 @@ struct NonlinearExpr{V<:AbstractVariableRef} <: AbstractJuMPScalar return new{variable_ref_type(args[index])}(head, args) end - function NonlinearExpr{V}( + function GenericNonlinearExpr{V}( head::Symbol, args::Vector{Any}, ) where {V<:AbstractVariableRef} @@ -95,31 +99,58 @@ struct NonlinearExpr{V<:AbstractVariableRef} <: AbstractJuMPScalar end end -variable_ref_type(::NonlinearExpr{V}) where {V} = V +""" + NonlinearExpr + +Alias for `GenericNonlinearExpr{VariableRef}`, the specific +[`GenericNonlinearExpr`](@ref) used by JuMP. +""" +const NonlinearExpr = GenericNonlinearExpr{VariableRef} + +variable_ref_type(::GenericNonlinearExpr{V}) where {V} = V # We include this method so that we can refactor the internal representation of -# NonlinearExpr without having to rewrite the method overloads. -function NonlinearExpr{V}(head::Symbol, args...) where {V<:AbstractVariableRef} - return NonlinearExpr{V}(head, Any[args...]) +# GenericNonlinearExpr without having to rewrite the method overloads. +function GenericNonlinearExpr{V}( + head::Symbol, + args..., +) where {V<:AbstractVariableRef} + return GenericNonlinearExpr{V}(head, Any[args...]) end -Base.length(x::NonlinearExpr) = length(x.args) -Base.getindex(x::NonlinearExpr, i::Int) = x.args[i] +Base.length(x::GenericNonlinearExpr) = length(x.args) +Base.getindex(x::GenericNonlinearExpr, i::Int) = x.args[i] const _PREFIX_OPERATORS = (:+, :-, :*, :/, :^, :||, :&&, :>, :<, :(<=), :(>=), :(==)) -function function_string(::MIME"text/plain", x::NonlinearExpr) +_needs_parentheses(::Union{Number,AbstractVariableRef}) = false +_needs_parentheses(::Any) = true +function _needs_parentheses(x::GenericNonlinearExpr) + return x.head in _PREFIX_OPERATORS && length(x) > 1 +end + +function function_string(::MIME"text/plain", x::GenericNonlinearExpr) io, stack = IOBuffer(), Any[x] while !isempty(stack) arg = pop!(stack) - if arg isa NonlinearExpr + if arg isa GenericNonlinearExpr if arg.head in _PREFIX_OPERATORS && length(arg) > 1 - print(io, "(") - push!(stack, ")") + if _needs_parentheses(arg[1]) + print(io, "(") + end + if _needs_parentheses(arg.args[end]) + push!(stack, ")") + end for i in length(arg):-1:2 push!(stack, arg[i]) + if _needs_parentheses(arg.args[i]) + push!(stack, "(") + end push!(stack, " $(arg.head) ") + if _needs_parentheses(arg.args[i-1]) + push!(stack, ")") + end end push!(stack, arg[1]) else @@ -139,11 +170,11 @@ function function_string(::MIME"text/plain", x::NonlinearExpr) return read(io, String) end -function function_string(::MIME"text/latex", x::NonlinearExpr) +function function_string(::MIME"text/latex", x::GenericNonlinearExpr) io, stack = IOBuffer(), Any[x] while !isempty(stack) arg = pop!(stack) - if arg isa NonlinearExpr + if arg isa GenericNonlinearExpr if arg.head in _PREFIX_OPERATORS && length(arg) > 1 print(io, "\\left({") push!(stack, "}\\right)") @@ -172,7 +203,7 @@ end _isequal(x, y) = x == y _isequal(x::T, y::T) where {T<:AbstractJuMPScalar} = isequal_canonical(x, y) -function isequal_canonical(x::NonlinearExpr, y::NonlinearExpr) +function isequal_canonical(x::GenericNonlinearExpr, y::GenericNonlinearExpr) return x.head == y.head && length(x) == length(y) && all(i -> _isequal(x[i], y[i]), 1:length(x)) @@ -181,17 +212,17 @@ end function MOI.Nonlinear.parse_expression( data::MOI.Nonlinear.Model, expr::MOI.Nonlinear.Expression, - x::NonlinearExpr, + x::GenericNonlinearExpr, parent::Int, ) stack = Tuple{Int,Any}[(parent, x)] while !isempty(stack) parent_node, arg = pop!(stack) - if arg isa NonlinearExpr + if arg isa GenericNonlinearExpr _parse_without_recursion_inner(stack, data, expr, arg, parent_node) else - # We can use recursion here, because NonlinearExpr only occur in - # other NonlinearExpr. + # We can use recursion here, because GenericNonlinearExpr only occur in + # other GenericNonlinearExpr. MOI.Nonlinear.parse_expression(data, expr, arg, parent_node) end end @@ -230,9 +261,13 @@ end # Method definitions -Base.zero(::Type{NonlinearExpr{V}}) where {V} = NonlinearExpr{V}(:+, 0.0) +function Base.zero(::Type{GenericNonlinearExpr{V}}) where {V} + return GenericNonlinearExpr{V}(:+, 0.0) +end -Base.one(::Type{NonlinearExpr{V}}) where {V} = NonlinearExpr{V}(:+, 1.0) +function Base.one(::Type{GenericNonlinearExpr{V}}) where {V} + return GenericNonlinearExpr{V}(:+, 1.0) +end # Univariate operators @@ -241,17 +276,19 @@ for f in MOI.Nonlinear.DEFAULT_UNIVARIATE_OPERATORS if f == :+ continue # We don't need this. elseif f == :- - @eval Base.:-(x::NonlinearExpr{V}) where {V} = NonlinearExpr{V}(:-, x) + @eval function Base.:-(x::GenericNonlinearExpr{V}) where {V} + return GenericNonlinearExpr{V}(:-, x) + end elseif isdefined(Base, f) @eval function Base.$(f)(x::AbstractJuMPScalar) - return NonlinearExpr{variable_ref_type(x)}($op, x) + return GenericNonlinearExpr{variable_ref_type(x)}($op, x) end elseif isdefined(MOI.Nonlinear, :SpecialFunctions) # The operator is defined in some other package. SF = MOI.Nonlinear.SpecialFunctions if isdefined(SF, f) @eval function $(SF).$(f)(x::AbstractJuMPScalar) - return NonlinearExpr{variable_ref_type(x)}($op, x) + return GenericNonlinearExpr{variable_ref_type(x)}($op, x) end end end @@ -270,21 +307,21 @@ for f in (:+, :-, :*, :^, :/, :atan) @eval begin function Base.$(f)(x::AbstractJuMPScalar, y::_Constant) rhs = convert(Float64, _constant_to_number(y)) - return NonlinearExpr{variable_ref_type(x)}($op, x, rhs) + return GenericNonlinearExpr{variable_ref_type(x)}($op, x, rhs) end function Base.$(f)(x::_Constant, y::AbstractJuMPScalar) lhs = convert(Float64, _constant_to_number(x)) - return NonlinearExpr{variable_ref_type(y)}($op, lhs, y) + return GenericNonlinearExpr{variable_ref_type(y)}($op, lhs, y) end function Base.$(f)(x::AbstractJuMPScalar, y::AbstractJuMPScalar) - return NonlinearExpr{variable_ref_type(x)}($op, x, y) + return GenericNonlinearExpr{variable_ref_type(x)}($op, x, y) end end end function _MA.operate!!( ::typeof(_MA.add_mul), - x::NonlinearExpr, + x::GenericNonlinearExpr, y::AbstractJuMPScalar, ) if x.head == :+ @@ -295,7 +332,7 @@ function _MA.operate!!( end """ - flatten(expr::NonlinearExpr) + flatten(expr::GenericNonlinearExpr) Flatten a nonlinear expression by lifting nested `+` and `*` nodes into a single n-ary operation. @@ -315,21 +352,21 @@ julia> @variable(model, x) x julia> y = prod(x for i in 1:4) -((x² * x) * x) +((x²) * x) * x julia> flatten(y) -(x² * x * x) +(x²) * x * x julia> flatten(sin(y)) -sin((x² * x * x)) +sin((x²) * x * x) ``` """ -function flatten(expr::NonlinearExpr{V}) where {V} - root = NonlinearExpr{V}(expr.head, Any[]) +function flatten(expr::GenericNonlinearExpr{V}) where {V} + root = GenericNonlinearExpr{V}(expr.head, Any[]) nodes_to_visit = Any[(root, arg) for arg in reverse(expr.args)] while !isempty(nodes_to_visit) parent, arg = pop!(nodes_to_visit) - if !(arg isa NonlinearExpr) + if !(arg isa GenericNonlinearExpr) # Not a nonlinear expression, so can use recursion. push!(parent.args, flatten(arg)) elseif parent.head in (:+, :*) && arg.head == parent.head @@ -354,7 +391,7 @@ end flatten(expr) = expr function _ifelse(a::AbstractJuMPScalar, x, y) - return NonlinearExpr{variable_ref_type(a)}(:ifelse, Any[a, x, y]) + return GenericNonlinearExpr{variable_ref_type(a)}(:ifelse, Any[a, x, y]) end for (f, op) in ( @@ -369,20 +406,20 @@ for (f, op) in ( op = Meta.quot(op) @eval begin function $(f)(x::AbstractJuMPScalar, y) - return NonlinearExpr{variable_ref_type(x)}($op, x, y) + return GenericNonlinearExpr{variable_ref_type(x)}($op, x, y) end function $(f)(x, y::AbstractJuMPScalar) - return NonlinearExpr{variable_ref_type(y)}($op, x, y) + return GenericNonlinearExpr{variable_ref_type(y)}($op, x, y) end function $(f)(x::AbstractJuMPScalar, y::AbstractJuMPScalar) - return NonlinearExpr{variable_ref_type(x)}($op, x, y) + return GenericNonlinearExpr{variable_ref_type(x)}($op, x, y) end end end # JuMP interop -function owner_model(expr::NonlinearExpr) +function owner_model(expr::GenericNonlinearExpr) for arg in expr.args if !(arg isa AbstractJuMPScalar) continue @@ -395,7 +432,10 @@ function owner_model(expr::NonlinearExpr) return nothing end -function check_belongs_to_model(expr::NonlinearExpr, model::AbstractModel) +function check_belongs_to_model( + expr::GenericNonlinearExpr, + model::AbstractModel, +) for arg in expr.args if arg isa AbstractJuMPScalar check_belongs_to_model(arg, model) @@ -404,7 +444,7 @@ function check_belongs_to_model(expr::NonlinearExpr, model::AbstractModel) return end -function moi_function(f::NonlinearExpr) +function moi_function(f::GenericNonlinearExpr) ret = MOI.ScalarNonlinearFunction(f.head, Any[]) stack = Tuple{MOI.ScalarNonlinearFunction,Any}[] for arg in reverse(f.args) @@ -412,7 +452,7 @@ function moi_function(f::NonlinearExpr) end while !isempty(stack) parent, arg = pop!(stack) - if arg isa NonlinearExpr + if arg isa GenericNonlinearExpr new_ret = MOI.ScalarNonlinearFunction(arg.head, Any[]) push!(parent.args, new_ret) for child in reverse(arg.args) @@ -427,16 +467,17 @@ function moi_function(f::NonlinearExpr) return ret end -function jump_function(model::Model, f::MOI.ScalarNonlinearFunction) - ret = NonlinearExpr{VariableRef}(f.head, Any[]) - stack = Tuple{NonlinearExpr,Any}[] +function jump_function(model::GenericModel, f::MOI.ScalarNonlinearFunction) + V = variable_ref_type(typeof(model)) + ret = GenericNonlinearExpr{V}(f.head, Any[]) + stack = Tuple{GenericNonlinearExpr,Any}[] for arg in reverse(f.args) push!(stack, (ret, arg)) end while !isempty(stack) parent, arg = pop!(stack) if arg isa MOI.ScalarNonlinearFunction - new_ret = NonlinearExpr{VariableRef}(arg.head, Any[]) + new_ret = GenericNonlinearExpr{V}(arg.head, Any[]) push!(parent.args, new_ret) for child in reverse(arg.args) push!(stack, (new_ret, child)) @@ -450,11 +491,14 @@ function jump_function(model::Model, f::MOI.ScalarNonlinearFunction) return ret end -function jump_function_type(::Model, ::Type{<:MOI.ScalarNonlinearFunction}) - return NonlinearExpr{VariableRef} +function jump_function_type( + model::GenericModel, + ::Type{<:MOI.ScalarNonlinearFunction}, +) + return GenericNonlinearExpr{variable_ref_type(typeof(model))} end -moi_function_type(::Type{<:NonlinearExpr}) = MOI.ScalarNonlinearFunction +moi_function_type(::Type{<:GenericNonlinearExpr}) = MOI.ScalarNonlinearFunction function constraint_object(c::NonlinearConstraintRef) nlp = nonlinear_model(c.model) @@ -462,7 +506,8 @@ function constraint_object(c::NonlinearConstraintRef) return ScalarConstraint(jump_function(c.model, data.expression), data.set) end -function jump_function(model::Model, expr::MOI.Nonlinear.Expression) +function jump_function(model::GenericModel, expr::MOI.Nonlinear.Expression) + V = variable_ref_type(typeof(model)) nlp = nonlinear_model(model) parsed = Vector{Any}(undef, length(expr.nodes)) adj = MOI.Nonlinear.adjacency_matrix(expr.nodes) @@ -470,17 +515,17 @@ function jump_function(model::Model, expr::MOI.Nonlinear.Expression) for i in length(expr.nodes):-1:1 node = expr.nodes[i] parsed[i] = if node.type == MOI.Nonlinear.NODE_CALL_UNIVARIATE - NonlinearExpr{VariableRef}( + GenericNonlinearExpr{V}( nlp.operators.univariate_operators[node.index], parsed[rowvals[SparseArrays.nzrange(adj, i)[1]]], ) elseif node.type == MOI.Nonlinear.NODE_CALL_MULTIVARIATE - NonlinearExpr{VariableRef}( + GenericNonlinearExpr{V}( nlp.operators.multivariate_operators[node.index], Any[parsed[rowvals[j]] for j in SparseArrays.nzrange(adj, i)], ) elseif node.type == MOI.Nonlinear.NODE_MOI_VARIABLE - VariableRef(model, MOI.VariableIndex(node.index)) + V(model, MOI.VariableIndex(node.index)) elseif node.type == MOI.Nonlinear.NODE_PARAMETER NonlinearParameter(model, node.index) elseif node.type == MOI.Nonlinear.NODE_SUBEXPRESSION @@ -496,11 +541,11 @@ function jump_function(model::Model, expr::MOI.Nonlinear.Expression) return parsed[1] end -function value(f::Function, expr::NonlinearExpr) +function value(f::Function, expr::GenericNonlinearExpr) return _evaluate_expr(MOI.Nonlinear.OperatorRegistry(), f, expr) end -function value(a::NonlinearExpr; result::Int = 1) +function value(a::GenericNonlinearExpr; result::Int = 1) return value(a) do x return value(x; result = result) end @@ -522,7 +567,11 @@ function _evaluate_expr( return convert(Float64, expr) end -function _evaluate_user_defined_function(registry, f, expr::NonlinearExpr) +function _evaluate_user_defined_function( + registry, + f, + expr::GenericNonlinearExpr, +) model = owner_model(expr) op, nargs = expr.head, length(expr.args) udf = MOI.get(model, MOI.UserDefinedFunction(op, nargs)) @@ -539,7 +588,7 @@ end function _evaluate_expr( registry::MOI.Nonlinear.OperatorRegistry, f::Function, - expr::NonlinearExpr, + expr::GenericNonlinearExpr, ) op = expr.head # TODO(odow): uses private function @@ -570,10 +619,10 @@ end # These converts are used in the {add,sub}mul definition for AbstractJuMPScalar. -Base.convert(::Type{<:NonlinearExpr}, x::AbstractVariableRef) = x +Base.convert(::Type{<:GenericNonlinearExpr}, x::AbstractVariableRef) = x function Base.convert( - ::Type{<:NonlinearExpr}, + ::Type{<:GenericNonlinearExpr}, x::GenericAffExpr{C,V}, ) where {C,V} args = Any[] @@ -581,7 +630,7 @@ function Base.convert( if isone(coef) push!(args, variable) elseif !iszero(coef) - push!(args, NonlinearExpr{V}(:*, coef, variable)) + push!(args, GenericNonlinearExpr{V}(:*, coef, variable)) end end if !iszero(x.constant) || isempty(args) @@ -590,11 +639,11 @@ function Base.convert( if length(args) == 1 return args[1] end - return NonlinearExpr{V}(:+, args) + return GenericNonlinearExpr{V}(:+, args) end function Base.convert( - ::Type{<:NonlinearExpr}, + ::Type{<:GenericNonlinearExpr}, x::GenericQuadExpr{C,V}, ) where {C,V} args = Any[] @@ -602,14 +651,14 @@ function Base.convert( if isone(coef) push!(args, variable) elseif !iszero(coef) - push!(args, NonlinearExpr{V}(:*, coef, variable)) + push!(args, GenericNonlinearExpr{V}(:*, coef, variable)) end end for (pair, coef) in x.terms if isone(coef) - push!(args, NonlinearExpr{V}(:*, pair.a, pair.b)) + push!(args, GenericNonlinearExpr{V}(:*, pair.a, pair.b)) elseif !iszero(coef) - push!(args, NonlinearExpr{V}(:*, coef, pair.a, pair.b)) + push!(args, GenericNonlinearExpr{V}(:*, coef, pair.a, pair.b)) end end if !iszero(x.aff.constant) || isempty(args) @@ -618,37 +667,37 @@ function Base.convert( if length(args) == 1 return args[1] end - return NonlinearExpr{V}(:+, args) + return GenericNonlinearExpr{V}(:+, args) end function _MA.promote_operation( ::Union{typeof(+),typeof(-),typeof(*)}, - ::Type{NonlinearExpr{V}}, + ::Type{GenericNonlinearExpr{V}}, ::Type{<:AbstractJuMPScalar}, ) where {V<:AbstractVariableRef} - return NonlinearExpr{V} + return GenericNonlinearExpr{V} end function _MA.promote_operation( ::Union{typeof(+),typeof(-),typeof(*)}, ::Type{<:AbstractJuMPScalar}, - ::Type{NonlinearExpr{V}}, + ::Type{GenericNonlinearExpr{V}}, ) where {V<:AbstractVariableRef} - return NonlinearExpr{V} + return GenericNonlinearExpr{V} end function _MA.promote_operation( ::Union{typeof(+),typeof(-),typeof(*)}, - ::Type{NonlinearExpr{V}}, - ::Type{NonlinearExpr{V}}, + ::Type{GenericNonlinearExpr{V}}, + ::Type{GenericNonlinearExpr{V}}, ) where {V<:AbstractVariableRef} - return NonlinearExpr{V} + return GenericNonlinearExpr{V} end function _MA.promote_operation( ::Union{typeof(+),typeof(-),typeof(*)}, - ::Type{NonlinearExpr{U}}, - ::Type{NonlinearExpr{V}}, + ::Type{GenericNonlinearExpr{U}}, + ::Type{GenericNonlinearExpr{V}}, ) where {U<:AbstractVariableRef,V<:AbstractVariableRef} return error( "Unable to promote two different types of nonlinear expression", @@ -693,7 +742,9 @@ struct UserDefinedFunction head::Symbol end -(f::UserDefinedFunction)(args...) = NonlinearExpr(f.head, Any[a for a in args]) +function (f::UserDefinedFunction)(args...) + return GenericNonlinearExpr(f.head, Any[a for a in args]) +end """ add_user_defined_function( @@ -737,7 +788,7 @@ foo(x) ``` """ function add_user_defined_function( - model::Model, + model::GenericModel, op::Symbol, dim::Int, args::Vararg{Function,N}, diff --git a/src/operators.jl b/src/operators.jl index 2922714d0d7..311afb73714 100644 --- a/src/operators.jl +++ b/src/operators.jl @@ -204,7 +204,7 @@ function Base.:^(lhs::AbstractVariableRef, rhs::Integer) elseif rhs == 0 return one(GenericQuadExpr{T,variable_ref_type(lhs)}) else - return NonlinearExpr(:^, Any[lhs, rhs]) + return GenericNonlinearExpr(:^, Any[lhs, rhs]) end end @@ -216,7 +216,7 @@ function Base.:^(lhs::GenericAffExpr{T}, rhs::Integer) where {T} elseif rhs == 0 return one(GenericQuadExpr{T,variable_ref_type(lhs)}) else - return NonlinearExpr(:^, Any[lhs, rhs]) + return GenericNonlinearExpr(:^, Any[lhs, rhs]) end end diff --git a/src/optimizer_interface.jl b/src/optimizer_interface.jl index 2a4db592806..80f54bf0e17 100644 --- a/src/optimizer_interface.jl +++ b/src/optimizer_interface.jl @@ -415,6 +415,13 @@ function optimize!( # The nlp_model is not kept in sync, so re-set it here. # TODO: Consider how to handle incremental solves. if nonlinear_model(model) !== nothing + if _uses_new_nonlinear_interface(model) + error( + "Cannot optimize a model which contains the features from " * + "both the legacy and new nonlinear interfaces. You must use " * + "one or the other.", + ) + end evaluator = MOI.Nonlinear.Evaluator( nonlinear_model(model), _differentiation_backend, @@ -454,6 +461,18 @@ function optimize!( return end +function _uses_new_nonlinear_interface(model) + if objective_function_type(model) <: GenericNonlinearExpr + return true + end + for (F, S) in list_of_constraint_types(model) + if F <: GenericNonlinearExpr + return true + end + end + return false +end + """ compute_conflict!(model::GenericModel) diff --git a/test/test_expr.jl b/test/test_expr.jl index f17b977b75b..1aad5b4fb0c 100644 --- a/test/test_expr.jl +++ b/test/test_expr.jl @@ -445,4 +445,10 @@ function test_expression_ambiguities() return end +function test_quadexpr_owner_model() + quad = GenericQuadExpr{Int,Int}() + @test owner_model(quad) === nothing + return +end + end # TestExpr diff --git a/test/test_nlp.jl b/test/test_nlp.jl index c1279f18185..d4f34cfe56a 100644 --- a/test/test_nlp.jl +++ b/test/test_nlp.jl @@ -1605,7 +1605,7 @@ function test_parse_expression_nonlinearexpr_call() model = Model() @variable(model, x) @variable(model, y) - f = NonlinearExpr(:ifelse, Any[x, 0, y]) + f = GenericNonlinearExpr(:ifelse, Any[x, 0, y]) @NLexpression(model, ref, f) nlp = nonlinear_model(model) expr = :(ifelse($x, 0, $y)) @@ -1617,7 +1617,7 @@ function test_parse_expression_nonlinearexpr_or() model = Model() @variable(model, x) @variable(model, y) - f = NonlinearExpr(:||, Any[x, y]) + f = GenericNonlinearExpr(:||, Any[x, y]) @NLexpression(model, ref, f) nlp = nonlinear_model(model) expr = :($x || $y) @@ -1629,7 +1629,7 @@ function test_parse_expression_nonlinearexpr_and() model = Model() @variable(model, x) @variable(model, y) - f = NonlinearExpr(:&&, Any[x, y]) + f = GenericNonlinearExpr(:&&, Any[x, y]) @NLexpression(model, ref, f) nlp = nonlinear_model(model) expr = :($x && $y) @@ -1641,7 +1641,7 @@ function test_parse_expression_nonlinearexpr_unsupported() model = Model() @variable(model, x) @variable(model, y) - f = NonlinearExpr(:foo, Any[x, y]) + f = GenericNonlinearExpr(:foo, Any[x, y]) @test_throws( MOI.UnsupportedNonlinearOperator, @NLexpression(model, ref, f), @@ -1649,4 +1649,47 @@ function test_parse_expression_nonlinearexpr_unsupported() return end +function test_parse_expression_nonlinearexpr_nested_comparison() + model = Model() + @variable(model, x) + @variable(model, y) + f = GenericNonlinearExpr(:||, Any[x, y]) + g = GenericNonlinearExpr(:&&, Any[f, x]) + @NLexpression(model, ref, g) + nlp = nonlinear_model(model) + expr = :(($x || $y) && $x) + @test MOI.Nonlinear.parse_expression(nlp, expr) == nlp[index(ref)] + return +end + +function test_parse_boolean_comparison_fallbacks() + model = Model() + @variable(model, x) + @test @expression(model, ifelse(true && true, x, 0.0)) === x + @test @expression(model, ifelse(true || false, x, 0.0)) === x + @test @expression(model, ifelse(1 < 2, x, 0.0)) === x + @test @expression(model, ifelse(1 <= 2, x, 0.0)) === x + @test @expression(model, ifelse(2 > 1, x, 0.0)) === x + @test @expression(model, ifelse(2 >= 1, x, 0.0)) === x + @test @expression(model, ifelse(2 == 2, x, 0.0)) === x + @test @expression(model, ifelse(true && false, x, 0.0)) === 0.0 + @test @expression(model, ifelse(false || false, x, 0.0)) === 0.0 + @test @expression(model, ifelse(2 < 1, x, 0.0)) === 0.0 + @test @expression(model, ifelse(2 <= 1, x, 0.0)) === 0.0 + @test @expression(model, ifelse(1 > 2, x, 0.0)) === 0.0 + @test @expression(model, ifelse(1 >= 2, x, 0.0)) === 0.0 + @test @expression(model, ifelse(1 == 2, x, 0.0)) === 0.0 + return +end + +function test_get_node_type_comparison() + model = Model() + @variable(model, x) + expr = @expression(model, ifelse(x >= 0, x, 0.0)) + @NLexpression(model, ref, ifelse(x >= 0, x, 0.0)) + nlp = nonlinear_model(model) + @test MOI.Nonlinear.parse_expression(nlp, expr) == nlp[index(ref)] + return +end + end diff --git a/test/test_nlp_expr.jl b/test/test_nlp_expr.jl index 7e62f951a68..924a0bc1eb1 100644 --- a/test/test_nlp_expr.jl +++ b/test/test_nlp_expr.jl @@ -17,13 +17,13 @@ function test_extension_univariate_operators( for f in MOI.Nonlinear.DEFAULT_UNIVARIATE_OPERATORS if f in (:+, :-, :abs2) op = getfield(Base, f) - @test op(sin(x)) isa NonlinearExpr{VariableRefType} + @test op(sin(x)) isa GenericNonlinearExpr{VariableRefType} elseif isdefined(Base, f) op = getfield(Base, f) - @test op(x) isa NonlinearExpr{VariableRefType} + @test op(x) isa GenericNonlinearExpr{VariableRefType} elseif isdefined(MOI.Nonlinear.SpecialFunctions, f) op = getfield(MOI.Nonlinear.SpecialFunctions, f) - @test op(x) isa NonlinearExpr{VariableRefType} + @test op(x) isa GenericNonlinearExpr{VariableRefType} end end return @@ -37,15 +37,15 @@ function test_extension_binary_operators( @variable(model, x) num, aff, quad, nlp = 1.0, 1.0 + x, x^2, sin(x) for op in (+, -, *, /), a in (num, x, aff, quad, nlp) - @test op(a, nlp) isa NonlinearExpr{VariableRefType} - @test op(nlp, a) isa NonlinearExpr{VariableRefType} + @test op(a, nlp) isa GenericNonlinearExpr{VariableRefType} + @test op(nlp, a) isa GenericNonlinearExpr{VariableRefType} end for op in (*, /), a in (x, aff) - @test op(a, quad) isa NonlinearExpr{VariableRefType} - @test op(quad, a) isa NonlinearExpr{VariableRefType} + @test op(a, quad) isa GenericNonlinearExpr{VariableRefType} + @test op(quad, a) isa GenericNonlinearExpr{VariableRefType} end for a in (num, x, aff, quad), b in (x, aff, quad) - @test /(a, b) isa NonlinearExpr{VariableRefType} + @test /(a, b) isa GenericNonlinearExpr{VariableRefType} end return end @@ -57,7 +57,7 @@ function test_extension_objective( model = ModelType() @variable(model, x) @objective(model, Min, 2.0 * sin(x)^2 + cos(x) / x) - @test objective_function(model) isa NonlinearExpr{VariableRefType} + @test objective_function(model) isa GenericNonlinearExpr{VariableRefType} return end @@ -68,19 +68,21 @@ function test_extension_expression( model = ModelType() @variable(model, x) @variable(model, y[1:3]) - @test string(@expression(model, *(y...))) == "(y[1]*y[2] * y[3])" + @test string(@expression(model, *(y...))) == "(y[1]*y[2]) * y[3]" @test string(@expression(model, sin(x))) == "sin(x)" - @test string(@expression(model, 2^x)) == "(2.0 ^ x)" - @test string(@expression(model, x^x)) == "(x ^ x)" - @test string(@expression(model, sin(x)^2)) == "(sin(x) ^ 2.0)" - @test string(@expression(model, sin(x)^2.0)) == "(sin(x) ^ 2.0)" - @test string(@expression(model, 2 * sin(x)^2.0)) == "(2.0 * (sin(x) ^ 2.0))" - @test string(@expression(model, 1 + sin(x))) == "(1.0 + sin(x))" - @test string(@expression(model, 1 + 2 * sin(x))) == "(1.0 + (2.0 * sin(x)))" + @test string(@expression(model, ifelse(x >= 0, x, 0))) == + "ifelse(x >= 0, x, 0)" + @test string(@expression(model, 2^x)) == "2.0 ^ x" + @test string(@expression(model, x^x)) == "x ^ x" + @test string(@expression(model, sin(x)^2)) == "sin(x) ^ 2.0" + @test string(@expression(model, sin(x)^2.0)) == "sin(x) ^ 2.0" + @test string(@expression(model, 2 * sin(x)^2.0)) == "2.0 * (sin(x) ^ 2.0)" + @test string(@expression(model, 1 + sin(x))) == "1.0 + sin(x)" + @test string(@expression(model, 1 + 2 * sin(x))) == "1.0 + (2.0 * sin(x))" @test string(@expression(model, 2.0 * sin(x)^2 + cos(x) / x)) == - "((2.0 * (sin(x) ^ 2.0)) + (cos(x) / x))" + "(2.0 * (sin(x) ^ 2.0)) + (cos(x) / x)" @test string(@expression(model, 2.0 * sin(x)^2 - cos(x) / x)) == - "((2.0 * (sin(x) ^ 2.0)) - (cos(x) / x))" + "(2.0 * (sin(x) ^ 2.0)) - (cos(x) / x)" return end @@ -90,32 +92,32 @@ function test_extension_flatten_nary( ) model = ModelType() @variable(model, x) - expr_plus = NonlinearExpr{VariableRefType}(:+, Any[x]) - expr_mult = NonlinearExpr{VariableRefType}(:*, Any[x]) - expr_sin = NonlinearExpr{VariableRefType}(:sin, Any[x]) + expr_plus = GenericNonlinearExpr{VariableRefType}(:+, Any[x]) + expr_mult = GenericNonlinearExpr{VariableRefType}(:*, Any[x]) + expr_sin = GenericNonlinearExpr{VariableRefType}(:sin, Any[x]) to_string(x) = string(flatten(x)) - @test to_string(+(expr_plus, 1)) == "(x + 1.0)" - @test to_string(+(1, expr_plus)) == "(1.0 + x)" - @test to_string(+(expr_plus, x)) == "(x + x)" - @test to_string(+(expr_sin, x)) == "(sin(x) + x)" - @test to_string(+(x, expr_plus)) == "(x + x)" - @test to_string(+(x, expr_sin)) == "(x + sin(x))" - @test to_string(+(expr_plus, expr_plus)) == "(x + x)" - @test to_string(+(expr_plus, expr_sin)) == "(x + sin(x))" - @test to_string(+(expr_sin, expr_plus)) == "(sin(x) + x)" - @test to_string(+(expr_sin, expr_sin)) == "(sin(x) + sin(x))" - @test to_string(*(expr_mult, 2)) == "(x * 2.0)" - @test to_string(*(2, expr_mult)) == "(2.0 * x)" - @test to_string(*(expr_mult, x)) == "(x * x)" - @test to_string(*(expr_sin, x)) == "(sin(x) * x)" - @test to_string(*(x, expr_mult)) == "(x * x)" - @test to_string(*(x, expr_sin)) == "(x * sin(x))" - @test to_string(*(expr_mult, expr_mult)) == "(x * x)" - @test to_string(*(expr_mult, expr_sin)) == "(x * sin(x))" - @test to_string(*(expr_sin, expr_mult)) == "(sin(x) * x)" - @test to_string(*(expr_sin, expr_sin)) == "(sin(x) * sin(x))" - @test to_string(sin(+(expr_plus, 1))) == "sin((x + 1.0))" - @test to_string(sin(*(expr_mult, expr_mult))) == "sin((x * x))" + @test to_string(+(expr_plus, 1)) == "x + 1.0" + @test to_string(+(1, expr_plus)) == "1.0 + x" + @test to_string(+(expr_plus, x)) == "x + x" + @test to_string(+(expr_sin, x)) == "sin(x) + x" + @test to_string(+(x, expr_plus)) == "x + x" + @test to_string(+(x, expr_sin)) == "x + sin(x)" + @test to_string(+(expr_plus, expr_plus)) == "x + x" + @test to_string(+(expr_plus, expr_sin)) == "x + sin(x)" + @test to_string(+(expr_sin, expr_plus)) == "sin(x) + x" + @test to_string(+(expr_sin, expr_sin)) == "sin(x) + sin(x)" + @test to_string(*(expr_mult, 2)) == "x * 2.0" + @test to_string(*(2, expr_mult)) == "2.0 * x" + @test to_string(*(expr_mult, x)) == "x * x" + @test to_string(*(expr_sin, x)) == "sin(x) * x" + @test to_string(*(x, expr_mult)) == "x * x" + @test to_string(*(x, expr_sin)) == "x * sin(x)" + @test to_string(*(expr_mult, expr_mult)) == "x * x" + @test to_string(*(expr_mult, expr_sin)) == "x * sin(x)" + @test to_string(*(expr_sin, expr_mult)) == "sin(x) * x" + @test to_string(*(expr_sin, expr_sin)) == "sin(x) * sin(x)" + @test to_string(sin(+(expr_plus, 1))) == "sin(x + 1.0)" + @test to_string(sin(*(expr_mult, expr_mult))) == "sin(x * x)" return end @@ -123,8 +125,8 @@ function test_extension_zero_one( ModelType = Model, VariableRefType = VariableRef, ) - @test string(zero(NonlinearExpr{VariableRefType})) == "+(0.0)" - @test string(one(NonlinearExpr{VariableRefType})) == "+(1.0)" + @test string(zero(GenericNonlinearExpr{VariableRefType})) == "+(0.0)" + @test string(one(GenericNonlinearExpr{VariableRefType})) == "+(1.0)" return end @@ -146,16 +148,16 @@ function test_extension_expression_addmul( ) model = ModelType() @variable(model, x) - @test string(@expression(model, x + 3 * sin(x))) == "(x + (3.0 * sin(x)))" + @test string(@expression(model, x + 3 * sin(x))) == "x + (3.0 * sin(x))" @test string(@expression(model, 2 * x + 3 * sin(x))) == - "(2 x + (3.0 * sin(x)))" + "(2 x) + (3.0 * sin(x))" @test string(@expression(model, x^2 + 3 * sin(x))) == - "($(x^2) + (3.0 * sin(x)))" + "($(x^2)) + (3.0 * sin(x))" @test string(@expression(model, sin(x) + 3 * sin(x))) == - "(sin(x) + (3.0 * sin(x)))" - @test string(@expression(model, sin(x) + 3 * x)) == "(sin(x) + 3 x)" + "sin(x) + (3.0 * sin(x))" + @test string(@expression(model, sin(x) + 3 * x)) == "sin(x) + (3 x)" @test string(@expression(model, sin(x) + 3 * x * x)) == - "(sin(x) + 3 $(x^2))" + "sin(x) + (3 $(x^2))" return end @@ -165,16 +167,16 @@ function test_extension_expression_submul( ) model = ModelType() @variable(model, x) - @test string(@expression(model, x - 3 * sin(x))) == "(x - (3.0 * sin(x)))" + @test string(@expression(model, x - 3 * sin(x))) == "x - (3.0 * sin(x))" @test string(@expression(model, 2 * x - 3 * sin(x))) == - "(2 x - (3.0 * sin(x)))" + "(2 x) - (3.0 * sin(x))" @test string(@expression(model, x^2 - 3 * sin(x))) == - "($(x^2) - (3.0 * sin(x)))" + "($(x^2)) - (3.0 * sin(x))" @test string(@expression(model, sin(x) - 3 * sin(x))) == - "(sin(x) - (3.0 * sin(x)))" - @test string(@expression(model, sin(x) - 3 * x)) == "(sin(x) - 3 x)" + "sin(x) - (3.0 * sin(x))" + @test string(@expression(model, sin(x) - 3 * x)) == "sin(x) - (3 x)" @test string(@expression(model, sin(x) - 3 * x * x)) == - "(sin(x) - 3 $(x^2))" + "sin(x) - (3 $(x^2))" return end @@ -184,12 +186,12 @@ function test_extension_aff_expr_convert( ) model = ModelType() @variable(model, x) - _to_string(x) = string(convert(NonlinearExpr{VariableRefType}, x)) + _to_string(x) = string(convert(GenericNonlinearExpr{VariableRefType}, x)) @test _to_string(AffExpr(0.0)) == "0.0" @test _to_string(AffExpr(1.0)) == "1.0" - @test _to_string(x + 1) == "(x + 1.0)" - @test _to_string(2x + 1) == "((2.0 * x) + 1.0)" - @test _to_string(2x) == "(2.0 * x)" + @test _to_string(x + 1) == "x + 1.0" + @test _to_string(2x + 1) == "(2.0 * x) + 1.0" + @test _to_string(2x) == "2.0 * x" return end @@ -199,18 +201,18 @@ function test_extension_quad_expr_convert( ) model = ModelType() @variable(model, x) - _to_string(x) = string(convert(NonlinearExpr{VariableRefType}, x)) + _to_string(x) = string(convert(GenericNonlinearExpr{VariableRefType}, x)) @test _to_string(QuadExpr(AffExpr(0.0))) == "0.0" @test _to_string(QuadExpr(AffExpr(1.0))) == "1.0" - @test _to_string(x^2 + 1) == "((x * x) + 1.0)" - @test _to_string(2x^2 + 1) == "((2.0 * x * x) + 1.0)" - @test _to_string(2x^2) == "(2.0 * x * x)" - @test _to_string(x^2 + x + 1) == "(x + (x * x) + 1.0)" - @test _to_string(2x^2 + x + 1) == "(x + (2.0 * x * x) + 1.0)" - @test _to_string(2x^2 + x) == "(x + (2.0 * x * x))" - @test _to_string(x^2 + 2x + 1) == "((2.0 * x) + (x * x) + 1.0)" - @test _to_string(2x^2 + 2x + 1) == "((2.0 * x) + (2.0 * x * x) + 1.0)" - @test _to_string(2x^2 + 2x) == "((2.0 * x) + (2.0 * x * x))" + @test _to_string(x^2 + 1) == "(x * x) + 1.0" + @test _to_string(2x^2 + 1) == "(2.0 * x * x) + 1.0" + @test _to_string(2x^2) == "2.0 * x * x" + @test _to_string(x^2 + x + 1) == "x + (x * x) + 1.0" + @test _to_string(2x^2 + x + 1) == "x + (2.0 * x * x) + 1.0" + @test _to_string(2x^2 + x) == "x + (2.0 * x * x)" + @test _to_string(x^2 + 2x + 1) == "(2.0 * x) + (x * x) + 1.0" + @test _to_string(2x^2 + 2x + 1) == "(2.0 * x) + (2.0 * x * x) + 1.0" + @test _to_string(2x^2 + 2x) == "(2.0 * x) + (2.0 * x * x)" return end @@ -237,7 +239,7 @@ function test_extension_constraint_lessthan( @constraint(model, c, 2.0 * sin(x)^2 + cos(x) / x <= 1) obj = constraint_object(c) @test isequal_canonical(obj.func, 2.0 * sin(x)^2 + cos(x) / x - 1) - @test obj.set == MOI.LessThan(0.0) + @test obj.set == MOI.LessThan(zero(value_type(ModelType))) return end @@ -250,7 +252,7 @@ function test_extension_constraint_greaterthan( @constraint(model, c, 2.0 * sin(x)^2 + cos(x) / x >= 1) obj = constraint_object(c) @test isequal_canonical(obj.func, 2.0 * sin(x)^2 + cos(x) / x - 1) - @test obj.set == MOI.GreaterThan(0.0) + @test obj.set == MOI.GreaterThan(zero(value_type(ModelType))) return end @@ -263,7 +265,7 @@ function test_extension_constraint_equalto( @constraint(model, c, 2.0 * sin(x)^2 + cos(x) / x == 1) obj = constraint_object(c) @test isequal_canonical(obj.func, 2.0 * sin(x)^2 + cos(x) / x - 1) - @test obj.set == MOI.EqualTo(0.0) + @test obj.set == MOI.EqualTo(zero(value_type(ModelType))) return end @@ -276,7 +278,8 @@ function test_extension_constraint_interval( @constraint(model, c, 0 <= 2.0 * sin(x)^2 + cos(x) / x <= 1) obj = constraint_object(c) @test isequal_canonical(obj.func, 2.0 * sin(x)^2 + cos(x) / x) - @test obj.set == MOI.Interval(0.0, 1.0) + T = value_type(ModelType) + @test obj.set == MOI.Interval(zero(T), one(T)) return end @@ -284,10 +287,11 @@ function test_user_defined_function_overload() model = Model() @variable(model, x) f(x::Real) = x^2 - f(x::AbstractJuMPScalar) = NonlinearExpr{VariableRef}(:f, x) + f(x::AbstractJuMPScalar) = NonlinearExpr(:f, x) register(model, :f, 1, f; autodiff = true) @test string(@expression(model, f(x))) == "f(x)" - @test string(f(x) + f(x)) == "(f(x) + f(x))" + @test string(f(x) + f(x)) == "f(x) + f(x)" + @test string(1 / (f(x) + f(x))) == "1.0 / (f(x) + f(x))" return end @@ -298,7 +302,7 @@ function test_extension_nonlinear_matrix_algebra( model = ModelType() @variable(model, X[1:3, 1:3], Symmetric) @objective(model, Max, sum(X^4 .- X^3)) - @test objective_function(model) isa NonlinearExpr{VariableRefType} + @test objective_function(model) isa GenericNonlinearExpr{VariableRefType} return end @@ -316,7 +320,7 @@ function test_extension_recursion_stackoverflow( for _ in 1:20_000 expr = sin(expr) end - @test @objective(model, Min, expr) isa NonlinearExpr{VariableRefType} + @test @objective(model, Min, expr) isa GenericNonlinearExpr{VariableRefType} @test string(expr) isa String return end @@ -326,8 +330,8 @@ function test_nlparameter_interaction() @variable(model, x) @NLparameter(model, p == 1) e = x + p - @test e isa NonlinearExpr - @test string(e) == "(x + $p)" + @test e isa GenericNonlinearExpr + @test string(e) == "x + ($p)" return end @@ -336,8 +340,8 @@ function test_nlexpression_interaction() @variable(model, x) @NLexpression(model, expr, sin(x)) e = x + expr - @test e isa NonlinearExpr - @test string(e) == "(x + $expr)" + @test e isa GenericNonlinearExpr + @test string(e) == "x + ($expr)" return end @@ -371,7 +375,7 @@ function test_jump_function_nonlinearexpr() @NLexpression(model, expr1, sin(p + x)) @NLexpression(model, expr2, sin(expr1)) nlp = nonlinear_model(model) - @test string(jump_function(model, nlp[index(expr1)])) == "sin(($p + $x))" + @test string(jump_function(model, nlp[index(expr1)])) == "sin(($p) + $x)" @test string(jump_function(model, nlp[index(expr2)])) == "sin($expr1)" return end @@ -402,7 +406,7 @@ function test_extension_expr_mle( sum((data[i] - x)^2 for i in 1:n) / (2 * y^2) ) @test string(obj) == - "((2.0 * log((1.0 / 2 $(y^2)))) - (4 $(x^2) - 30 x + 85 / 2 $(y^2)))" + "(2.0 * log(1.0 / (2 $(y^2)))) - ((4 $(x^2) - 30 x + 85) / (2 $(y^2)))" return end @@ -414,46 +418,49 @@ function test_extension_nl_macro( @variable(model, x) @test isequal_canonical( @expression(model, ifelse(x, 1, 2)), - NonlinearExpr(:ifelse, Any[x, 1, 2]), + GenericNonlinearExpr(:ifelse, Any[x, 1, 2]), ) @test isequal_canonical( @expression(model, x || 1), - NonlinearExpr(:||, Any[x, 1]), + GenericNonlinearExpr(:||, Any[x, 1]), ) @test isequal_canonical( @expression(model, x && 1), - NonlinearExpr(:&&, Any[x, 1]), + GenericNonlinearExpr(:&&, Any[x, 1]), ) @test isequal_canonical( @expression(model, x < 0), - NonlinearExpr(:<, Any[x, 0]), + GenericNonlinearExpr(:<, Any[x, 0]), ) @test isequal_canonical( @expression(model, x > 0), - NonlinearExpr(:>, Any[x, 0]), + GenericNonlinearExpr(:>, Any[x, 0]), ) @test isequal_canonical( @expression(model, x <= 0), - NonlinearExpr(:<=, Any[x, 0]), + GenericNonlinearExpr(:<=, Any[x, 0]), ) @test isequal_canonical( @expression(model, x >= 0), - NonlinearExpr(:>=, Any[x, 0]), + GenericNonlinearExpr(:>=, Any[x, 0]), ) @test isequal_canonical( @expression(model, x == 0), - NonlinearExpr(:(==), Any[x, 0]), + GenericNonlinearExpr(:(==), Any[x, 0]), ) @test isequal_canonical( @expression(model, 0 < x <= 1), - NonlinearExpr( + GenericNonlinearExpr( :&&, Any[@expression(model, 0 < x), @expression(model, x <= 1)], ), ) @test isequal_canonical( @expression(model, ifelse(x > 0, x^2, sin(x))), - NonlinearExpr(:ifelse, Any[@expression(model, x > 0), x^2, sin(x)]), + GenericNonlinearExpr( + :ifelse, + Any[@expression(model, x > 0), x^2, sin(x)], + ), ) return end @@ -463,7 +470,7 @@ function test_register_univariate() @variable(model, x) @register(model, f, 1, x -> x^2) @test isequal_canonical(@expression(model, f(x)), f(x)) - @test isequal_canonical(f(x), NonlinearExpr(:f, Any[x])) + @test isequal_canonical(f(x), GenericNonlinearExpr(:f, Any[x])) attrs = MOI.get(model, MOI.ListOfModelAttributesSet()) @test MOI.UserDefinedFunction(:f, 1) in attrs return @@ -474,7 +481,7 @@ function test_register_univariate_gradient() @variable(model, x) @register(model, f, 1, x -> x^2, x -> 2 * x) @test isequal_canonical(@expression(model, f(x)), f(x)) - @test isequal_canonical(f(x), NonlinearExpr(:f, Any[x])) + @test isequal_canonical(f(x), GenericNonlinearExpr(:f, Any[x])) attrs = MOI.get(model, MOI.ListOfModelAttributesSet()) @test MOI.UserDefinedFunction(:f, 1) in attrs return @@ -485,7 +492,7 @@ function test_register_univariate_gradient_hessian() @variable(model, x) @register(model, f, 1, x -> x^2, x -> 2 * x, x -> 2.0) @test isequal_canonical(@expression(model, f(x)), f(x)) - @test isequal_canonical(f(x), NonlinearExpr(:f, Any[x])) + @test isequal_canonical(f(x), GenericNonlinearExpr(:f, Any[x])) attrs = MOI.get(model, MOI.ListOfModelAttributesSet()) @test MOI.UserDefinedFunction(:f, 1) in attrs return @@ -497,7 +504,7 @@ function test_register_multivariate_() f = (x...) -> sum(x .^ 2) @register(model, foo, 2, f) @test isequal_canonical(@expression(model, foo(x...)), foo(x...)) - @test isequal_canonical(foo(x...), NonlinearExpr(:foo, Any[x...])) + @test isequal_canonical(foo(x...), GenericNonlinearExpr(:foo, Any[x...])) attrs = MOI.get(model, MOI.ListOfModelAttributesSet()) @test MOI.UserDefinedFunction(:foo, 2) in attrs return @@ -510,7 +517,7 @@ function test_register_multivariate_gradient() ∇f = (g, x...) -> (g .= 2 .* x) @register(model, foo, 2, f, ∇f) @test isequal_canonical(@expression(model, foo(x...)), foo(x...)) - @test isequal_canonical(foo(x...), NonlinearExpr(:foo, Any[x...])) + @test isequal_canonical(foo(x...), GenericNonlinearExpr(:foo, Any[x...])) attrs = MOI.get(model, MOI.ListOfModelAttributesSet()) @test MOI.UserDefinedFunction(:foo, 2) in attrs return @@ -528,7 +535,7 @@ function test_register_multivariate_gradient_hessian() end @register(model, foo, 2, f, ∇f, ∇²f) @test isequal_canonical(@expression(model, foo(x...)), foo(x...)) - @test isequal_canonical(foo(x...), NonlinearExpr(:foo, Any[x...])) + @test isequal_canonical(foo(x...), GenericNonlinearExpr(:foo, Any[x...])) attrs = MOI.get(model, MOI.ListOfModelAttributesSet()) @test MOI.UserDefinedFunction(:foo, 2) in attrs return @@ -555,7 +562,7 @@ function test_expression_no_variable() "Unable to create a nonlinear expression because it did not " * "contain any JuMP scalars. head = $head, args = $args.", ), - NonlinearExpr(head, args), + GenericNonlinearExpr(head, args), ) return end @@ -595,13 +602,66 @@ function test_value_expression() return end +function test_value_result() + model = Model() do + return MOI.Utilities.MockOptimizer(MOI.Utilities.Model{Float64}()) + end + @variable(model, x) + optimize!(model) + mock = unsafe_backend(model) + MOI.set(mock, MOI.TerminationStatus(), MOI.OPTIMAL) + MOI.set(mock, MOI.ResultCount(), 2) + MOI.set(mock, MOI.PrimalStatus(1), MOI.FEASIBLE_POINT) + MOI.set(mock, MOI.PrimalStatus(2), MOI.FEASIBLE_POINT) + MOI.set(mock, MOI.VariablePrimal(1), optimizer_index(x), 1.1) + MOI.set(mock, MOI.VariablePrimal(2), optimizer_index(x), 2.2) + f = sin(x) + @test value(f; result = 1) ≈ sin(1.1) + @test value(f; result = 2) ≈ sin(2.2) + return +end + +function test_nonlinear_expr_owner_model() + model = Model() + @variable(model, x) + f = GenericNonlinearExpr(:sin, Any[x]) + # This shouldn't happen in regular code, but let's test against it to check + # we get something similar to AffExpr and QuadExpr. + empty!(f.args) + @test owner_model(f) === nothing + return +end + +function test_operate_shortcut_ma_operate!!_add_mul() + model = Model() + @variable(model, x) + @expression(model, sum(sin(x) for i in 1:3)) + return +end + function test_show_nonlinear_model() model = Model() @variable(model, x >= -1) @objective(model, Min, exp(x)) @constraint(model, sin(x) <= 0) str = sprint(show, model) - @test occursin("NonlinearExpr{", str) + @test occursin("NonlinearExpr", str) + return +end + +function test_error_both_nl_interfaces() + model = Model() + @variable(model, x) + @constraint(model, log(x) <= 1) + @NLconstraint(model, log(x) <= 1) + @test_throws( + ErrorException( + "Cannot optimize a model which contains the features from both " * + "the legacy and new nonlinear interfaces. You must use one or " * + "the other.", + ), + optimize!(model), + ) return end diff --git a/test/test_operator.jl b/test/test_operator.jl index 1b395b6866e..3903c3818e0 100644 --- a/test/test_operator.jl +++ b/test/test_operator.jl @@ -106,7 +106,7 @@ function test_extension_broadcast_division_error( copy(B.rowval), vec(x), ) - NonlinearExprType = NonlinearExpr{VariableRefType} + NonlinearExprType = GenericNonlinearExpr{VariableRefType} @test A ./ x isa Matrix{NonlinearExprType} @test B ./ x isa SparseArrays.SparseMatrixCSC{NonlinearExprType,Int} @test A ./ y isa SparseArrays.SparseMatrixCSC{NonlinearExprType,Int} @@ -336,17 +336,17 @@ function test_extension_basic_operators_number( @test_expression_with_string 4.13 + w "w + 4.13" @test_expression_with_string 3.16 - w "-w + 3.16" @test_expression_with_string 5.23 * w "5.23 w" - @test_expression_with_string 2.94 / w "(2.94 / w)" + @test_expression_with_string 2.94 / w "2.94 / w" # 1-3 Number--AffExpr @test_expression_with_string 1.5 + aff "7.1 x + 4" @test_expression_with_string 1.5 - aff "-7.1 x - 1" @test_expression_with_string 2 * aff "14.2 x + 5" - @test_expression_with_string 2 / aff "(2.0 / 7.1 x + 2.5)" + @test_expression_with_string 2 / aff "2.0 / (7.1 x + 2.5)" # 1-4 Number--QuadExpr @test_expression_with_string 1.5 + q "2.5 y*z + 7.1 x + 4" @test_expression_with_string 1.5 - q "-2.5 y*z - 7.1 x - 1" @test_expression_with_string 2 * q "5 y*z + 14.2 x + 5" - @test_expression_with_string 2 / q "(2.0 / 2.5 y*z + 7.1 x + 2.5)" + @test_expression_with_string 2 / q "2.0 / (2.5 y*z + 7.1 x + 2.5)" return end @@ -377,27 +377,27 @@ function test_extension_basic_operators_variable( @test_expression_with_string(x^2, "x²", interrable = false) @test_expression_with_string(x^1, "x", interrable = false) @test_expression_with_string(x^0, "1", interrable = false) - @test_expression_with_string(x^3, "(x ^ 3)", interrable = false) - @test_expression_with_string x^(T(15) / T(10)) "(x ^ 1.5)" + @test_expression_with_string(x^3, "x ^ 3", interrable = false) + @test_expression_with_string x^(T(15) / T(10)) "x ^ 1.5" # 2-2 Variable--Variable @test_expression_with_string w + x "w + x" @test_expression_with_string w - x "w - x" @test_expression_with_string w * x "w*x" @test_expression_with_string x - x "0" - @test_expression_with_string w / x "(w / x)" + @test_expression_with_string w / x "w / x" @test_expression_with_string y * z - x "y*z - x" # 2-3 Variable--AffExpr @test_expression_with_string z + aff "z + 7.1 x + 2.5" @test_expression_with_string z - aff "z - 7.1 x - 2.5" @test_expression_with_string z * aff "7.1 z*x + 2.5 z" - @test_expression_with_string z / aff "(z / 7.1 x + 2.5)" + @test_expression_with_string z / aff "z / (7.1 x + 2.5)" @test_throws MethodError z ≤ aff @test_expression_with_string β * x - aff "0 x - 2.5" # 2-4 Variable--QuadExpr @test_expression_with_string w + q "2.5 y*z + w + 7.1 x + 2.5" @test_expression_with_string w - q "-2.5 y*z + w - 7.1 x - 2.5" - @test_expression_with_string w * q "(w * 2.5 y*z + 7.1 x + 2.5)" - @test_expression_with_string w / q "(w / 2.5 y*z + 7.1 x + 2.5)" + @test_expression_with_string w * q "w * (2.5 y*z + 7.1 x + 2.5)" + @test_expression_with_string w / q "w / (2.5 y*z + 7.1 x + 2.5)" @test transpose(x) === x @test conj(x) === x return @@ -446,32 +446,32 @@ function test_extension_basic_operators_affexpr( ) @test_expression_with_string(aff^0, "1", inferrable = false) @test_expression_with_string((7.1 * x + 2.5)^0, "1", inferrable = false) - @test_expression_with_string(aff^3, "(7.1 x + 2.5 ^ 3)", inferrable = false) + @test_expression_with_string(aff^3, "(7.1 x + 2.5) ^ 3", inferrable = false) @test_expression_with_string( (7.1 * x + 2.5)^3, - "(7.1 x + 2.5 ^ 3)", + "(7.1 x + 2.5) ^ 3", inferrable = false ) - @test_expression_with_string aff^1.5 "(7.1 x + 2.5 ^ 1.5)" - @test_expression_with_string (7.1 * x + 2.5)^1.5 "(7.1 x + 2.5 ^ 1.5)" + @test_expression_with_string aff^1.5 "(7.1 x + 2.5) ^ 1.5" + @test_expression_with_string (7.1 * x + 2.5)^1.5 "(7.1 x + 2.5) ^ 1.5" # 3-2 AffExpr--Variable @test_expression_with_string aff + z "7.1 x + z + 2.5" @test_expression_with_string aff - z "7.1 x - z + 2.5" @test_expression_with_string aff * z "7.1 x*z + 2.5 z" - @test_expression_with_string aff / z "(7.1 x + 2.5 / z)" + @test_expression_with_string aff / z "(7.1 x + 2.5) / z" @test_expression_with_string aff - 7.1 * x "0 x + 2.5" # 3-3 AffExpr--AffExpr @test_expression_with_string aff + aff2 "7.1 x + 1.2 y + 3.7" @test_expression_with_string aff - aff2 "7.1 x - 1.2 y + 1.3" @test_expression_with_string aff * aff2 "8.52 x*y + 3 y + 8.52 x + 3" @test string((x + x) * (x + 3)) == string((x + 3) * (x + x)) # Issue #288 - @test_expression_with_string aff / aff2 "(7.1 x + 2.5 / 1.2 y + 1.2)" + @test_expression_with_string aff / aff2 "(7.1 x + 2.5) / (1.2 y + 1.2)" @test_expression_with_string aff - aff "0 x" # 4-4 AffExpr--QuadExpr @test_expression_with_string aff2 + q "2.5 y*z + 1.2 y + 7.1 x + 3.7" @test_expression_with_string aff2 - q "-2.5 y*z + 1.2 y - 7.1 x - 1.3" - @test_expression_with_string aff2 * q "(1.2 y + 1.2 * 2.5 y*z + 7.1 x + 2.5)" - @test_expression_with_string aff2 / q "(1.2 y + 1.2 / 2.5 y*z + 7.1 x + 2.5)" + @test_expression_with_string aff2 * q "(1.2 y + 1.2) * (2.5 y*z + 7.1 x + 2.5)" + @test_expression_with_string aff2 / q "(1.2 y + 1.2) / (2.5 y*z + 7.1 x + 2.5)" @test transpose(aff) === aff @test conj(aff) === aff return @@ -503,18 +503,18 @@ function test_extension_basic_operators_quadexpr( # 4-2 QuadExpr--Variable @test_expression_with_string q + w "2.5 y*z + 7.1 x + w + 2.5" @test_expression_with_string q - w "2.5 y*z + 7.1 x - w + 2.5" - @test_expression_with_string q * w "(2.5 y*z + 7.1 x + 2.5 * w)" - @test_expression_with_string q / w "(2.5 y*z + 7.1 x + 2.5 / w)" + @test_expression_with_string q * w "(2.5 y*z + 7.1 x + 2.5) * w" + @test_expression_with_string q / w "(2.5 y*z + 7.1 x + 2.5) / w" # 4-3 QuadExpr--AffExpr @test_expression_with_string q + aff2 "2.5 y*z + 7.1 x + 1.2 y + 3.7" @test_expression_with_string q - aff2 "2.5 y*z + 7.1 x - 1.2 y + 1.3" - @test_expression_with_string q * aff2 "(2.5 y*z + 7.1 x + 2.5 * 1.2 y + 1.2)" - @test_expression_with_string q / aff2 "(2.5 y*z + 7.1 x + 2.5 / 1.2 y + 1.2)" + @test_expression_with_string q * aff2 "(2.5 y*z + 7.1 x + 2.5) * (1.2 y + 1.2)" + @test_expression_with_string q / aff2 "(2.5 y*z + 7.1 x + 2.5) / (1.2 y + 1.2)" # 4-4 QuadExpr--QuadExpr @test_expression_with_string q + q2 "2.5 y*z + 8 x*z + 7.1 x + 1.2 y + 3.7" @test_expression_with_string q - q2 "2.5 y*z - 8 x*z + 7.1 x - 1.2 y + 1.3" - @test_expression_with_string q * q2 "(2.5 y*z + 7.1 x + 2.5 * 8 x*z + 1.2 y + 1.2)" - @test_expression_with_string q / q2 "(2.5 y*z + 7.1 x + 2.5 / 8 x*z + 1.2 y + 1.2)" + @test_expression_with_string q * q2 "(2.5 y*z + 7.1 x + 2.5) * (8 x*z + 1.2 y + 1.2)" + @test_expression_with_string q / q2 "(2.5 y*z + 7.1 x + 2.5) / (8 x*z + 1.2 y + 1.2)" @test transpose(q) === q @test conj(q) === q return @@ -621,7 +621,7 @@ function test_complex_pow() @test y^0 == (1.0 + 0im) @test y^1 == 0 * y * y + y @test y^2 == y * y - @test isequal_canonical(y^3, NonlinearExpr(:^, Any[y, 3])) + @test isequal_canonical(y^3, GenericNonlinearExpr(:^, Any[y, 3])) return end From 4126ffb71ec682e19241b8f787cd0dc547439721 Mon Sep 17 00:00:00 2001 From: Oscar Dowson Date: Wed, 16 Aug 2023 21:27:35 +1200 Subject: [PATCH 07/23] Add support for VectorNonlinearFunction (#3450) Update --- docs/Project.toml | 2 + docs/make.jl | 1 + docs/src/manual/nonlinear.md | 4 +- .../tutorials/nonlinear/complementarity.jl | 121 ++++++++++++++++++ src/nlp_expr.jl | 87 +++++++++---- src/optimizer_interface.jl | 5 +- test/test_nlp_expr.jl | 56 +++++++- 7 files changed, 243 insertions(+), 33 deletions(-) create mode 100644 docs/src/tutorials/nonlinear/complementarity.jl diff --git a/docs/Project.toml b/docs/Project.toml index 9f864d9f940..f12a5c0a32b 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -20,6 +20,7 @@ Markdown = "d6f4376e-aef5-505a-96c1-9c027394607a" MarkdownAST = "d0879d2d-cac2-40c8-9cee-1863dc0c7391" MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" MultiObjectiveAlgorithms = "0327d340-17cd-11ea-3e99-2fd5d98cecda" +PATHSolver = "f5f7c340-0bb3-5c69-969a-41884d311d1b" Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" @@ -48,6 +49,7 @@ JSONSchema = "1" Literate = "2.8" MathOptInterface = "=1.19.0" MultiObjectiveAlgorithms = "=1.2.0" +PATHSolver = "=1.6.0" Plots = "1" SCS = "=1.3.0" SQLite = "1" diff --git a/docs/make.jl b/docs/make.jl index b6cccf0ec0f..fc89f566ef3 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -308,6 +308,7 @@ const _PAGES = [ "tutorials/nonlinear/user_defined_hessians.md", "tutorials/nonlinear/nested_problems.md", "tutorials/nonlinear/querying_hessians.md", + "tutorials/nonlinear/complementarity.md", ], "Conic programs" => [ "tutorials/conic/introduction.md", diff --git a/docs/src/manual/nonlinear.md b/docs/src/manual/nonlinear.md index 2a2d7ec563d..36cddccd8c9 100644 --- a/docs/src/manual/nonlinear.md +++ b/docs/src/manual/nonlinear.md @@ -60,9 +60,7 @@ julia> delete(model, con[1]) ## Create a nonlinear expression -Use [`@expression`](@ref) to create nonlinear expression objects. The syntax -is identical to [`@expression`](@ref), except that the expression can contain -nonlinear terms. +Use [`@expression`](@ref) to create nonlinear expression objects: ```jldoctest nl_expression julia> model = Model(); diff --git a/docs/src/tutorials/nonlinear/complementarity.jl b/docs/src/tutorials/nonlinear/complementarity.jl new file mode 100644 index 00000000000..58674491bfd --- /dev/null +++ b/docs/src/tutorials/nonlinear/complementarity.jl @@ -0,0 +1,121 @@ +# Copyright 2017, Iain Dunning, Joey Huchette, Miles Lubin, and contributors #src +# This Source Code Form is subject to the terms of the Mozilla Public License #src +# v.2.0. If a copy of the MPL was not distributed with this file, You can #src +# obtain one at https://mozilla.org/MPL/2.0/. #src + +# # Mixed complementarity problems + +# This tutorial is a collection of examples of small mixed-complementarity +# programs. + +# This tutorial uses the following packages: + +using JuMP +import PATHSolver +import Test #src + +# ## Linear complementarity + +# Form a mixed complementarity problem using the perp symbol `⟂` (type +# `\perp` in the REPL). See [Complementarity constraints](@ref) for the +# definition of a complementarity constraint. + +M = [0 0 -1 -1; 0 0 1 -2; 1 -1 2 -2; 1 2 -2 4] +q = [2, 2, -2, -6] +model = Model(PATHSolver.Optimizer) +set_silent(model) +@variable(model, 0 <= x[1:4] <= 10, start = 0) +@constraint(model, M * x + q ⟂ x) +optimize!(model) +Test.@test value.(x) ≈ [2.8, 0.0, 0.8, 1.2] #src +value.(x) + +# ## Other ways of writing linear complementarity problems + +# You do not need to use a single vector of variables, and the complementarity +# constraints can be given in any order. In addition, you can either use the +# perp symbol, or you can use the [`MOI.Complements`](@ref) set. + +model = Model(PATHSolver.Optimizer) +set_silent(model) +@variable(model, 0 <= w <= 10, start = 0) +@variable(model, 0 <= x <= 10, start = 0) +@variable(model, 0 <= y <= 10, start = 0) +@variable(model, 0 <= z <= 10, start = 0) +@constraint(model, [y - 2z + 2, x] in MOI.Complements(2)) +@constraint(model, -y - z + 2 ⟂ w) +@constraint(model, w + 2x - 2y + 4z - 6 ⟂ z) +@constraint(model, w - x + 2y - 2z - 2 ⟂ y) +optimize!(model) +Test.@test value.([w, x, y, z]) ≈ [2.8, 0.0, 0.8, 1.2] #src +value.([w, x, y, z]) + +# ## Transportation + +# This is example is a reformulation of the transportation problem from Chapter +# 3.3 of Dantzig, G.B. (1963). _Linear Programming and Extensions_. Princeton +# University Press, Princeton, New Jersey. It is based on the GAMS model +# [`gamslib_transmcp`](https://www.gams.com/latest/gamslib_ml/libhtml/gamslib_transmcp.html). + +capacity = Dict("seattle" => 350, "san-diego" => 600) +demand = Dict("new-york" => 325, "chicago" => 300, "topeka" => 275) +cost = Dict( + ("seattle" => "new-york") => 90 * 2.5 / 1_000, + ("seattle" => "chicago") => 90 * 1.7 / 1_000, + ("seattle" => "topeka") => 90 * 1.8 / 1_000, + ("san-diego" => "new-york") => 90 * 2.5 / 1_000, + ("san-diego" => "chicago") => 90 * 1.8 / 1_000, + ("san-diego" => "topeka") => 90 * 1.4 / 1_000, +) +plants, markets = keys(capacity), keys(demand) +model = Model(PATHSolver.Optimizer) +set_silent(model) +@variable(model, w[i in plants] >= 0) +@variable(model, p[j in markets] >= 0) +@variable(model, x[i in plants, j in markets] >= 0) +@constraints( + model, + begin + [i in plants, j in markets], w[i] + cost[i=>j] - p[j] ⟂ x[i, j] + [i in plants], capacity[i] - sum(x[i, :]) ⟂ w[i] + [j in markets], sum(x[:, j]) - demand[j] ⟂ p[j] + end +) +optimize!(model) +Test.@test isapprox(value(p["new-york"]), 0.225; atol = 1e-3) #src +value.(p) + +# ## Expected utility of insurance + +# This example is taken from a lecture of the course AAE706, given by Thomas F. +# Rutherford at the University of Wisconsin, Madison. It models the expected +# coverage of insurance `K` that a rational actor would obtain to insure a risk +# that occurs with probability `pi` and results in a loss of `L`. + +pi = 0.01 # Probability of a bad outcome +L = 0.5 # Loss with a bad outcome +γ = 0.02 # Premium for coverage +σ = 0.5 # Elasticity +ρ = -1 # Risk exponent +U(C) = C^ρ / ρ +MU(C) = C^(ρ - 1) +model = Model(PATHSolver.Optimizer) +set_silent(model) +@variable(model, EU, start = 1) # Expected utilitiy +@variable(model, EV, start = 1) # Equivalent variation in income +@variable(model, C_G, start = 1) # Consumption on a good day +@variable(model, C_B, start = 1) # Consumption on a bad day +@variable(model, K, start = 1) # Coverage +@constraints( + model, + begin + (1 - pi) * U(C_G) + pi * U(C_B) - EU ⟂ EU + 100 * (((1 - pi) * C_G^ρ + pi * C_B^ρ)^(1 / ρ) - 1) - EV ⟂ EV + 1 - γ * K - C_G ⟂ C_G + 1 - L + (1 - γ) * K - C_B ⟂ C_B + γ * ((1 - pi) * MU(C_G) + pi * MU(C_B)) - pi * MU(C_B) ⟂ K + end +) +optimize!(model) +Test.@test isapprox(value(C_G), 0.996; atol = 1e-3) #src +value(K) diff --git a/src/nlp_expr.jl b/src/nlp_expr.jl index 0dfd6c54b91..baee9fca348 100644 --- a/src/nlp_expr.jl +++ b/src/nlp_expr.jl @@ -118,16 +118,13 @@ function GenericNonlinearExpr{V}( return GenericNonlinearExpr{V}(head, Any[args...]) end -Base.length(x::GenericNonlinearExpr) = length(x.args) -Base.getindex(x::GenericNonlinearExpr, i::Int) = x.args[i] - const _PREFIX_OPERATORS = (:+, :-, :*, :/, :^, :||, :&&, :>, :<, :(<=), :(>=), :(==)) _needs_parentheses(::Union{Number,AbstractVariableRef}) = false _needs_parentheses(::Any) = true function _needs_parentheses(x::GenericNonlinearExpr) - return x.head in _PREFIX_OPERATORS && length(x) > 1 + return x.head in _PREFIX_OPERATORS && length(x.args) > 1 end function function_string(::MIME"text/plain", x::GenericNonlinearExpr) @@ -135,15 +132,15 @@ function function_string(::MIME"text/plain", x::GenericNonlinearExpr) while !isempty(stack) arg = pop!(stack) if arg isa GenericNonlinearExpr - if arg.head in _PREFIX_OPERATORS && length(arg) > 1 - if _needs_parentheses(arg[1]) + if arg.head in _PREFIX_OPERATORS && length(arg.args) > 1 + if _needs_parentheses(arg.args[1]) print(io, "(") end if _needs_parentheses(arg.args[end]) push!(stack, ")") end - for i in length(arg):-1:2 - push!(stack, arg[i]) + for i in length(arg.args):-1:2 + push!(stack, arg.args[i]) if _needs_parentheses(arg.args[i]) push!(stack, "(") end @@ -152,15 +149,15 @@ function function_string(::MIME"text/plain", x::GenericNonlinearExpr) push!(stack, ")") end end - push!(stack, arg[1]) + push!(stack, arg.args[1]) else print(io, arg.head, "(") push!(stack, ")") - for i in length(arg):-1:2 - push!(stack, arg[i]) + for i in length(arg.args):-1:2 + push!(stack, arg.args[i]) push!(stack, ", ") end - push!(stack, arg[1]) + push!(stack, arg.args[1]) end else print(io, arg) @@ -175,22 +172,22 @@ function function_string(::MIME"text/latex", x::GenericNonlinearExpr) while !isempty(stack) arg = pop!(stack) if arg isa GenericNonlinearExpr - if arg.head in _PREFIX_OPERATORS && length(arg) > 1 + if arg.head in _PREFIX_OPERATORS && length(arg.args) > 1 print(io, "\\left({") push!(stack, "}\\right)") - for i in length(arg):-1:2 - push!(stack, arg[i]) + for i in length(arg.args):-1:2 + push!(stack, arg.args[i]) push!(stack, "} $(arg.head) {") end - push!(stack, arg[1]) + push!(stack, arg.args[1]) else print(io, "\\textsf{", arg.head, "}\\left({") push!(stack, "}\\right)") - for i in length(arg):-1:2 - push!(stack, arg[i]) + for i in length(arg.args):-1:2 + push!(stack, arg.args[i]) push!(stack, "}, {") end - push!(stack, arg[1]) + push!(stack, arg.args[1]) end else print(io, arg) @@ -205,8 +202,8 @@ _isequal(x::T, y::T) where {T<:AbstractJuMPScalar} = isequal_canonical(x, y) function isequal_canonical(x::GenericNonlinearExpr, y::GenericNonlinearExpr) return x.head == y.head && - length(x) == length(y) && - all(i -> _isequal(x[i], y[i]), 1:length(x)) + length(x.args) == length(y.args) && + all(i -> _isequal(x.args[i], y.args[i]), 1:length(x.args)) end function MOI.Nonlinear.parse_expression( @@ -229,9 +226,9 @@ function MOI.Nonlinear.parse_expression( return end -function _get_node_type(data, x) +function _get_node_type(data, x::GenericNonlinearExpr) id = get(data.operators.univariate_operator_to_id, x.head, nothing) - if length(x) == 1 && id !== nothing + if length(x.args) == 1 && id !== nothing return id, MOI.Nonlinear.NODE_CALL_UNIVARIATE end id = get(data.operators.multivariate_operator_to_id, x.head, nothing) @@ -249,12 +246,19 @@ function _get_node_type(data, x) return throw(MOI.UnsupportedNonlinearOperator(x.head)) end -function _parse_without_recursion_inner(stack, data, expr, x, parent) +function _parse_without_recursion_inner( + stack, + data, + expr, + x::GenericNonlinearExpr, + parent, +) id, node_type = _get_node_type(data, x) push!(expr.nodes, MOI.Nonlinear.Node(node_type, id, parent)) parent = length(expr.nodes) - for i in length(x):-1:1 # Args need to be pushed onto the stack in reverse - push!(stack, (parent, x[i])) + # Args need to be pushed onto the stack in reverse + for i in length(x.args):-1:1 + push!(stack, (parent, x.args[i])) end return end @@ -869,3 +873,34 @@ macro register(model, op, args...) ) return Expr(:(=), esc(op), rhs) end + +function jump_function_type( + ::GenericModel{T}, + ::Type{MOI.VectorNonlinearFunction}, +) where {T} + return Vector{GenericNonlinearExpr{GenericVariableRef{T}}} +end + +function jump_function( + model::GenericModel{T}, + f::MOI.VectorNonlinearFunction, +) where {T} + return GenericNonlinearExpr{GenericVariableRef{T}}[ + jump_function(model, fi) for fi in MOI.Utilities.eachscalar(f) + ] +end + +# We use `AbstractJuMPScalar` as a catch-all fallback for any mix of JuMP +# scalars that have not been dispatched by some other method. + +function moi_function_type(::Type{<:Vector{<:AbstractJuMPScalar}}) + return MOI.VectorNonlinearFunction +end + +function moi_function(f::Vector{<:AbstractJuMPScalar}) + return MOI.VectorNonlinearFunction(f) +end + +function MOI.VectorNonlinearFunction(f::Vector{<:AbstractJuMPScalar}) + return MOI.VectorNonlinearFunction(map(moi_function, f)) +end diff --git a/src/optimizer_interface.jl b/src/optimizer_interface.jl index 80f54bf0e17..89b3f40c2ea 100644 --- a/src/optimizer_interface.jl +++ b/src/optimizer_interface.jl @@ -418,8 +418,9 @@ function optimize!( if _uses_new_nonlinear_interface(model) error( "Cannot optimize a model which contains the features from " * - "both the legacy and new nonlinear interfaces. You must use " * - "one or the other.", + "both the legacy (macros beginning with `@NL`) and new " * + "(`NonlinearExpr`) nonlinear interfaces. You must use one or " * + "the other.", ) end evaluator = MOI.Nonlinear.Evaluator( diff --git a/test/test_nlp_expr.jl b/test/test_nlp_expr.jl index 924a0bc1eb1..3af788cd72d 100644 --- a/test/test_nlp_expr.jl +++ b/test/test_nlp_expr.jl @@ -656,8 +656,9 @@ function test_error_both_nl_interfaces() @NLconstraint(model, log(x) <= 1) @test_throws( ErrorException( - "Cannot optimize a model which contains the features from both " * - "the legacy and new nonlinear interfaces. You must use one or " * + "Cannot optimize a model which contains the features from " * + "both the legacy (macros beginning with `@NL`) and new " * + "(`NonlinearExpr`) nonlinear interfaces. You must use one or " * "the other.", ), optimize!(model), @@ -665,4 +666,55 @@ function test_error_both_nl_interfaces() return end +function test_VectorNonlinearFunction_moi_function() + model = Model() + @variable(model, x) + F = [sin(x)] + @test moi_function_type(typeof(F)) == MOI.VectorNonlinearFunction + @test isapprox( + moi_function(F), + MOI.VectorNonlinearFunction([ + MOI.ScalarNonlinearFunction(:sin, Any[index(x)]), + ]), + ) + @test MOI.VectorNonlinearFunction(F) ≈ moi_function(F) + @test jump_function_type(model, MOI.VectorNonlinearFunction) == + Vector{NonlinearExpr} + @test isequal_canonical(jump_function(model, moi_function(F)), F) + return +end + +function test_VectorNonlinearFunction_moi_function_AbstractJuMPScalar() + model = Model() + @variable(model, x) + F = [sin(x), x] + @test F isa Vector{AbstractJuMPScalar} + @test moi_function_type(typeof(F)) == MOI.VectorNonlinearFunction + @test isapprox( + moi_function(F), + MOI.VectorNonlinearFunction([ + MOI.ScalarNonlinearFunction(:sin, Any[index(x)]), + MOI.ScalarNonlinearFunction(:+, Any[index(x)]), + ]), + ) + @test MOI.VectorNonlinearFunction(F) ≈ moi_function(F) + @test jump_function_type(model, MOI.VectorNonlinearFunction) == + Vector{NonlinearExpr} + @test isequal_canonical( + jump_function(model, moi_function(F)), + [sin(x), NonlinearExpr(:+, x)], + ) + return +end + +function test_VectorNonlinearFunction_objective() + model = Model() + @variable(model, x) + F = [sin(x), sqrt(x)] + @objective(model, Min, F) + @test objective_function_type(model) == Vector{NonlinearExpr} + @test isequal_canonical(objective_function(model), F) + return +end + end # module From f3b5a99cc38b6be342c223a2183a6428a879ebf9 Mon Sep 17 00:00:00 2001 From: odow Date: Thu, 17 Aug 2023 09:37:52 +1200 Subject: [PATCH 08/23] Register user-defined functions and store function evaluator Fix unsaved changes Clarify comment in mutable_arithmetics.jl Apply suggestions from code review Update code coverage Fix formatting Update complementarity.jl Make logical overloads public Update Update Switch to manual register Fix docs Update docs/src/manual/nonlinear.md Update Update s/flatten/flatten! Fix formatting Error if term is Complex-valued Clarify that NonlinearExpr must be Real valued Update flatten heuristic Standardize udf_ prefix Improve performance of moi_function Improve performance of flatten! --- Project.toml | 2 +- docs/Project.toml | 1 + docs/src/manual/expressions.md | 102 +--- docs/src/manual/nonlinear.md | 276 ++++++++- .../tutorials/applications/power_systems.jl | 4 +- .../tutorials/nonlinear/complementarity.jl | 25 +- .../tutorials/nonlinear/nested_problems.jl | 8 +- .../tutorials/nonlinear/tips_and_tricks.jl | 16 +- .../nonlinear/user_defined_hessians.jl | 4 +- src/macros.jl | 203 ++++++- src/mutable_arithmetics.jl | 34 +- src/nlp_expr.jl | 279 ++++++--- src/variables.jl | 4 +- test/perf/NonlinearBenchmark.jl | 543 ++++++++++++++++++ test/test_nlp_expr.jl | 86 ++- 15 files changed, 1323 insertions(+), 264 deletions(-) create mode 100644 test/perf/NonlinearBenchmark.jl diff --git a/Project.toml b/Project.toml index 0a440675a7b..f4f7ba1ede1 100644 --- a/Project.toml +++ b/Project.toml @@ -22,7 +22,7 @@ JuMPDimensionalDataExt = "DimensionalData" [compat] DimensionalData = "0.24" MacroTools = "0.5" -MathOptInterface = "1.18" +MathOptInterface = "1.19" MutableArithmetics = "1.1" OrderedCollections = "1" SnoopPrecompile = "1" diff --git a/docs/Project.toml b/docs/Project.toml index f12a5c0a32b..99d8107c20b 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -26,6 +26,7 @@ Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" SCS = "c946c3f1-0d1f-5ce8-9dea-7daa1f7e2d13" SQLite = "0aa819cd-b072-5ff4-a722-6bc24af294d9" +SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b" StatsPlots = "f3b207a7-027a-5e70-b257-86293d7955fd" TOML = "fa267f1f-6049-4f14-aa54-33bafae1ed76" Tables = "bd369af6-aec1-5ad0-b16a-f7cc5008161c" diff --git a/docs/src/manual/expressions.md b/docs/src/manual/expressions.md index 2aed59d2ce6..59944f0c24a 100644 --- a/docs/src/manual/expressions.md +++ b/docs/src/manual/expressions.md @@ -327,107 +327,7 @@ julia> coefficient(ex, x) ## Nonlinear expressions Nonlinear expressions in JuMP are represented by a [`NonlinearExpr`](@ref) -object. - -### Constructors - -Nonlinear expressions can be created using the [`NonlinearExpr`](@ref) -constructors: - -```jldoctest nonlinear_expressions -julia> model = Model(); - -julia> @variable(model, x); - -julia> expr = NonlinearExpr(:sin, Any[x]) -sin(x) -``` - -or via operator overloading: - -```jldoctest -julia> model = Model(); - -julia> @variable(model, x); - -julia> expr = sin(x) -sin(x) -``` - -### Fields - -Each [`NonlinearExpr`](@ref) has two fields. - -The `.head` field is a `Symbol` that represents the operator being called: - -```jldoctest nonlinear_expressions -julia> expr.head -:sin -``` - -The `.args` field is a `Vector{Any}` containing the arguments to the operator: - -```jldoctest nonlinear_expressions -julia> expr.args -1-element Vector{Any}: - x -``` - -### Supported arguments - -Nonlinear expressions can contain a mix of numbers, [`AffExpr`](@ref), -[`QuadExpr`](@ref), and other [`NonlinearExpr`](@ref): - -```jldoctest -julia> model = Model(); - -julia> @variable(model, x); - -julia> aff = x + 1; - -julia> quad = x^2 + x; - -julia> expr = cos(x) * sin(quad) + aff -(cos(x) * sin(x² + x)) + (x + 1) -``` - -### Limitations - -Some nonlinear expressions cannot be created via operator overloading. For -example, to minimize the likelihood of bugs in user-code, we have not overloaded -comparisons such as `<` and `>=` between JuMP objects: - -```jldoctest -julia> model = Model(); - -julia> @variable(model, x); - -julia> x < 1 -ERROR: Cannot evaluate `<` between a variable and a number. -[...] -``` - -Instead, wrap the expression in the [`@expression`](@ref) macro: -```jldoctest -julia> model = Model(); - -julia> @variable(model, x); - -julia> expr = @expression(model, x < 1) -x < 1 -``` - -For technical reasons, other operators that are not overloaded include `||`, -`&&`, and `ifelse`. - -```jldoctest -julia> model = Model(); - -julia> @variable(model, x); - -julia> expr = @expression(model, ifelse(x < -1 || x >= 1, x^2, 0.0)) -ifelse((x < -1) || (x >= 1), x², 0.0) -``` +object. See [Nonlinear expressions in detail](@ref) for more details. ## Initializing arrays diff --git a/docs/src/manual/nonlinear.md b/docs/src/manual/nonlinear.md index 36cddccd8c9..bf361692761 100644 --- a/docs/src/manual/nonlinear.md +++ b/docs/src/manual/nonlinear.md @@ -111,6 +111,203 @@ julia> sin(sin(1.0)) 0.7456241416655579 ``` +## Nonlinear expressions in detail + +Nonlinear expressions in JuMP are represented by a [`NonlinearExpr`](@ref) +object. + +### Constructors + +Nonlinear expressions can be created using the [`NonlinearExpr`](@ref) +constructors: + +```jldoctest nonlinear_expressions +julia> model = Model(); + +julia> @variable(model, x); + +julia> expr = NonlinearExpr(:sin, Any[x]) +sin(x) +``` + +or via operator overloading: + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> expr = sin(x) +sin(x) +``` + +### Supported arguments + +Nonlinear expressions can contain a mix of numbers, [`AffExpr`](@ref), +[`QuadExpr`](@ref), and other [`NonlinearExpr`](@ref): + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> aff = x + 1; + +julia> quad = x^2 + x; + +julia> expr = cos(x) * sin(quad) + aff +(cos(x) * sin(x² + x)) + (x + 1) +``` + +### Supported operators + +The list of supported operators may vary between solvers. Given an optimizer, +query the list of supported operators using +[`MOI.ListOfSupportedNonlinearOperators`](@ref): +```jldoctest; filter=[r":.+", r"[0-9]+\-element"] +julia> import Ipopt + +julia> import MathOptInterface as MOI + +julia> MOI.get(Ipopt.Optimizer(), MOI.ListOfSupportedNonlinearOperators()) +85-element Vector{Symbol}: + :+ + :- + :abs + :sqrt + :cbrt + :abs2 + :inv + :log + :log10 + :log2 + ⋮ + :min + :max + :&& + :|| + :<= + :(==) + :>= + :< + :> +``` + +In some univariate cases, the operator is defined in [`SpecialFunctions.jl`](https://github.com/JuliaMath/SpecialFunctions.jl). +To use these functions, you must explicitly import `SpecialFunctions.jl` +```jldoctest +julia> import Ipopt + +julia> op = MOI.get(Ipopt.Optimizer(), MOI.ListOfSupportedNonlinearOperators()); + +julia> :erfcx in op +true + +julia> :dawson in op +true + +julia> import SpecialFunctions + +julia> model = Model(); + +julia> @variable(model, x) +x + +julia> @expression(model, SpecialFunctions.erfcx(x)) +erfcx(x) + +julia> @expression(model, SpecialFunctions.dawson(x)) +dawson(x) +``` + +### Limitations + +Some nonlinear expressions cannot be created via operator overloading. For +example, to minimize the likelihood of bugs in user-code, we have not overloaded +comparisons such as `<` and `>=` between JuMP objects: + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> x < 1 +ERROR: Cannot evaluate `<` between a variable and a number. +[...] +``` + +Instead, wrap the expression in the [`@expression`](@ref) macro: +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> expr = @expression(model, x < 1) +x < 1 +``` + +For technical reasons, other operators that are not overloaded include `||`, +`&&`, and `ifelse`. + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> expr = @expression(model, ifelse(x < -1 || x >= 1, x^2, 0.0)) +ifelse((x < -1) || (x >= 1), x², 0.0) +``` + +As an alternative, use the `JuMP.nonlinear_` functions, which fallback to the +various comparison and logical operators: +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> expr = nonlinear_ifelse( + nonlinear_or( + nonlinear_less_than(x, -1), + nonlinear_greater_equal(x, 1) + ), + x^2, + 0.0, + ) +ifelse((x < -1) || (x >= 1), x², 0.0) +``` + +The available functions are: + +| JuMP function | Julia function | +| :-------------------------------- | :------------- | +| [`nonlinear_ifelse`](@ref) | `ifelse` | +| [`nonlinear_and`](@ref) | `&&` | +| [`nonlinear_or`](@ref) | `\|\|` | +| [`nonlinear_greater_than`](@ref) | `>` | +| [`nonlinear_greater_equal`](@ref) | `>=` | +| [`nonlinear_less_than`](@ref) | `<` | +| [`nonlinear_less_equal`](@ref) | `<=` | +| [`nonlinear_equal_to`](@ref) | `==` | + +### Fields + +Each [`NonlinearExpr`](@ref) has two fields. + +The `.head` field is a `Symbol` that represents the operator being called: + +```jldoctest nonlinear_expressions +julia> expr.head +:sin +``` + +The `.args` field is a `Vector{Any}` containing the arguments to the operator: + +```jldoctest nonlinear_expressions +julia> expr.args +1-element Vector{Any}: + x +``` + ## User-defined functions In addition to a standard list of univariate and multivariate functions @@ -130,10 +327,10 @@ using JuMP square(x) = x^2 f(x, y) = (x - 1)^2 + (y - 2)^2 model = Model(); -@register(model, my_square, 1, square) -@register(model, my_f, 2, f) +@register(model, udf_square, 1, square) +@register(model, udf_f, 2, f) @variable(model, x[1:2]); -@objective(model, Min, my_f(x[1], my_square(x[2]))) +@objective(model, Min, udf_f(x[1], udf_square(x[2]))) ``` The arguments to [`@register`](@ref) are: @@ -160,10 +357,12 @@ using JuMP square(x) = x^2 f(x, y) = (x - 1)^2 + (y - 2)^2 model = Model(); -my_square = add_user_defined_function(model, :my_square, 1, square) -my_f = add_user_defined_function(model, :my_f, 2, f) +udf_square = add_user_defined_function(model, :udf_square, 1, square) +model[:udf_square] = udf_square +udf_f = add_user_defined_function(model, :udf_f, 2, f) +model[:udf_f] = udf_f @variable(model, x[1:2]); -@objective(model, Min, my_f(x[1], my_square(x[2]))) +@objective(model, Min, udf_f(x[1], udf_square(x[2]))) ``` This has two important consequences. @@ -182,20 +381,55 @@ Stacktrace: ``` and `square` already exists as a Julia function. -Second, you can construct and use [`UserDefinedFunction`](@ref)s outside the -macros. +Second, you can obtain a reference to the user-defined function using the +`model[:key]` syntax: ```@repl using JuMP square(x) = x^2 model = Model(); -@register(model, my_square, 1, square) -@variable(model, x) -typeof(my_square) -x_squared = my_square(x) -typeof(x_squared) -my_square_2 = UserDefinedFunction(:my_square) -my_square_2(x_squared) +@register(model, udf_square, 1, square) +udf_square_2 = model[:udf_square] +``` + +### Invalid redefinition of constant + +A common error encountered is `invalid redefinition of constant`. This occurs +when the name of the user-defined function is the same as an existing function: +```jldoctest nonlinear_invalid_redefinition +julia> using JuMP + +julia> model = Model(); + +julia> f(x) = x^2 +f (generic function with 1 method) + +julia> @register(model, f, 1, f) +ERROR: invalid redefinition of constant f +[...] +``` + +If you evaluate the function without registering it, JuMP will trace the +function using operator overloading: +```jldoctest nonlinear_invalid_redefinition +julia> @variable(model, x); + +julia> f(x) +x² +``` + +To force JuMP to treat `f` as a user-defined function and not trace it, register +the function using [`add_user_defined_function`](@ref) and define a new method +which manually creates a [`NonlinearExpr`](@ref): +```jldoctest nonlinear_invalid_redefinition +julia> _ = add_user_defined_function(model, :f, 1, f) +UserDefinedFunction{typeof(f)}(:f, f) + +julia> f(x::AbstractJuMPScalar) = NonlinearExpr(:f, Any[x]) +f (generic function with 2 methods) + +julia> @expression(model, log(f(x))) +log(f(x)) ``` ### Register gradients and Hessians @@ -217,9 +451,9 @@ f(x) = x^2 ∇f(x) = 2x ∇²f(x) = 2 model = Model(); -@register(model, my_square, 1, f, ∇f, ∇²f) # Providing ∇²f is optional +@register(model, udf_f, 1, f, ∇f, ∇²f) # Providing ∇²f is optional @variable(model, x) -@objective(model, Min, my_square(x)) +@objective(model, Min, udf_f(x)) ``` #### Multivariate functions @@ -250,7 +484,7 @@ model = Model(); ``` You may assume the Hessian matrix `H` is initialized with zeros, and because `H` -is symmetric, you need only to fill in the non-zero of the lower-triangular +is symmetric, you need only to fill in the non-zero lower-triangular terms. The matrix type passed in as `H` depends on the automatic differentiation system, so make sure the first argument to the Hessian function supports an `AbstractMatrix` (it may be something other than `Matrix{Float64}`). Moreover, @@ -277,8 +511,8 @@ using JuMP model = Model(); @variable(model, x[1:5]) f(x::Vector) = sum(x[i]^i for i in 1:length(x)) -@register(model, my_f, 5, (x...) -> f(collect(x))) -@objective(model, Min, my_f(x...)) +@register(model, udf_f, 5, (x...) -> f(collect(x))) +@objective(model, Min, udf_f(x...)) ``` ### Automatic differentiation @@ -292,7 +526,7 @@ differentiation of user-defined functions. computed derivatives are not subject to approximation error. JuMP uses [ForwardDiff.jl](https://github.com/JuliaDiff/ForwardDiff.jl) to -perform automatic differentiation; see the ForwardDiff.jl +perform automatic differentiation of user-defined functions; see the ForwardDiff.jl [documentation](https://www.juliadiff.org/ForwardDiff.jl/v0.10.2/user/limitations.html) for a description of how to write a function suitable for automatic differentiation. diff --git a/docs/src/tutorials/applications/power_systems.jl b/docs/src/tutorials/applications/power_systems.jl index 86073fb59b1..28b75c37d32 100644 --- a/docs/src/tutorials/applications/power_systems.jl +++ b/docs/src/tutorials/applications/power_systems.jl @@ -513,14 +513,14 @@ function solve_nonlinear_economic_dispatch( if silent set_silent(model) end - @register(model, tcf, 1, thermal_cost_function) + @register(model, udf_tcf, 1, thermal_cost_function) N = length(generators) @variable(model, generators[i].min <= g[i = 1:N] <= generators[i].max) @variable(model, 0 <= w <= scenario.wind) @objective( model, Min, - sum(generators[i].variable_cost * tcf(g[i]) for i in 1:N) + + sum(generators[i].variable_cost * udf_tcf(g[i]) for i in 1:N) + wind.variable_cost * w, ) @constraint(model, sum(g[i] for i in 1:N) + sqrt(w) == scenario.demand) diff --git a/docs/src/tutorials/nonlinear/complementarity.jl b/docs/src/tutorials/nonlinear/complementarity.jl index 58674491bfd..7495f8fbba6 100644 --- a/docs/src/tutorials/nonlinear/complementarity.jl +++ b/docs/src/tutorials/nonlinear/complementarity.jl @@ -5,8 +5,7 @@ # # Mixed complementarity problems -# This tutorial is a collection of examples of small mixed-complementarity -# programs. +# This tutorial is a collection of mixed complementarity programs. # This tutorial uses the following packages: @@ -14,6 +13,28 @@ using JuMP import PATHSolver import Test #src +# ## Background + +# A mixed complementarity problem has the form: +# ```math +# \begin{align} +# \;\;\text{s.t.} & F_i(x) \perp x_i & i = 1 \ldots n \\ +# & l_i \le x_i \le u_i & i = 1 \ldots n. +# \end{align} +# ``` +# where the ``\perp`` constraint enforces the following relations: +# +# - If ``l_i < x_i < u_i``, then ``F_i(x) == 0`` +# - If ``l_i == x_i``, then ``F_i(x) \ge 0`` +# - If ``x_i == u_i``, then ``F_i(x) \le 0`` + +# You may have seen a complementarity problem written as +# ``0 \le F(x) \perp x \ge 0``. This is a special case of a mixed +# complementarity problem in which ``l_i = 0`` and ``u_i = \infty``. + +# Importantly, a mixed complementarity problem does not have an objective, and +# no other constraint types are present. + # ## Linear complementarity # Form a mixed complementarity problem using the perp symbol `⟂` (type diff --git a/docs/src/tutorials/nonlinear/nested_problems.jl b/docs/src/tutorials/nonlinear/nested_problems.jl index dc6e420a16b..421a0b19039 100644 --- a/docs/src/tutorials/nonlinear/nested_problems.jl +++ b/docs/src/tutorials/nonlinear/nested_problems.jl @@ -141,8 +141,8 @@ end model = Model(Ipopt.Optimizer) @variable(model, x[1:2] >= 0) -@register(model, f_V, 2, V, ∇V, ∇²V) -@objective(model, Min, x[1]^2 + x[2]^2 + f_V(x[1], x[2])) +@register(model, udf_V, 2, V, ∇V, ∇²V) +@objective(model, Min, x[1]^2 + x[2]^2 + udf_V(x[1], x[2])) optimize!(model) solution_summary(model) @@ -215,13 +215,13 @@ model = Model(Ipopt.Optimizer) cache = Cache(Float64[], NaN, Float64[]) @register( model, - f_V, + udf_V, 2, (x...) -> cached_f(cache, x...), (g, x...) -> cached_∇f(cache, g, x...), (H, x...) -> cached_∇²f(cache, H, x...), ) -@objective(model, Min, x[1]^2 + x[2]^2 + f_V(x[1], x[2])) +@objective(model, Min, x[1]^2 + x[2]^2 + udf_V(x[1], x[2])) optimize!(model) solution_summary(model) diff --git a/docs/src/tutorials/nonlinear/tips_and_tricks.jl b/docs/src/tutorials/nonlinear/tips_and_tricks.jl index d3e6812a283..a3daf79b270 100644 --- a/docs/src/tutorials/nonlinear/tips_and_tricks.jl +++ b/docs/src/tutorials/nonlinear/tips_and_tricks.jl @@ -46,10 +46,10 @@ foo_2(x, y) = foo(x, y)[2] model = Model(Ipopt.Optimizer) set_silent(model) @variable(model, x[1:2] >= 0, start = 0.1) -@register(model, f_foo_1, 2, foo_1) -@register(model, f_foo_2, 2, foo_2) -@objective(model, Max, f_foo_1(x[1], x[2])) -@constraint(model, f_foo_2(x[1], x[2]) <= 2) +@register(model, udf_foo_1, 2, foo_1) +@register(model, udf_foo_2, 2, foo_2) +@objective(model, Max, udf_foo_1(x[1], x[2])) +@constraint(model, udf_foo_2(x[1], x[2]) <= 2) function_calls = 0 optimize!(model) Test.@test objective_value(model) ≈ √3 atol = 1e-4 @@ -114,10 +114,10 @@ println("function_calls = ", function_calls) model = Model(Ipopt.Optimizer) set_silent(model) @variable(model, x[1:2] >= 0, start = 0.1) -@register(model, f_foo_1, 2, memoized_foo[1]) -@register(model, f_foo_2, 2, memoized_foo[2]) -@objective(model, Max, f_foo_1(x[1], x[2])) -@constraint(model, f_foo_2(x[1], x[2]) <= 2) +@register(model, udf_foo_1, 2, memoized_foo[1]) +@register(model, udf_foo_2, 2, memoized_foo[2]) +@objective(model, Max, udf_foo_1(x[1], x[2])) +@constraint(model, udf_foo_2(x[1], x[2]) <= 2) function_calls = 0 optimize!(model) Test.@test objective_value(model) ≈ √3 atol = 1e-4 diff --git a/docs/src/tutorials/nonlinear/user_defined_hessians.jl b/docs/src/tutorials/nonlinear/user_defined_hessians.jl index 12f64254a4f..75aa67ce8dc 100644 --- a/docs/src/tutorials/nonlinear/user_defined_hessians.jl +++ b/docs/src/tutorials/nonlinear/user_defined_hessians.jl @@ -69,7 +69,7 @@ end model = Model(Ipopt.Optimizer) @variable(model, x[1:2]) -@register(model, f_rosenbrock, 2, rosenbrock, ∇rosenbrock, ∇²rosenbrock) -@objective(model, Min, f_rosenbrock(x[1], x[2])) +@register(model, udf_rosenbrock, 2, rosenbrock, ∇rosenbrock, ∇²rosenbrock) +@objective(model, Min, udf_rosenbrock(x[1], x[2])) optimize!(model) solution_summary(model; verbose = true) diff --git a/src/macros.jl b/src/macros.jl index f331f88b5a3..92689525c77 100644 --- a/src/macros.jl +++ b/src/macros.jl @@ -474,40 +474,207 @@ function parse_constraint_head( return is_vectorized, parse_code, build_call end -_ifelse(a, x, y) = ifelse(a, x, y) -_and(x, y) = x && y -_or(x, y) = x || y -_less_than(x, y) = x < y -_greater_than(x, y) = x > y -_less_equal(x, y) = x <= y -_greater_equal(x, y) = x >= y -_equal_to(x, y) = x == y +""" + nonlinear_ifelse(a, x, y) + +A function that falls back to `ifelse(a, x, y)`, but when called with JuMP +variables or expressions, returns a [`GenericNonlinearExpr`](@ref). + +## Example + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> nonlinear_ifelse(true, 1.0, 2.0) +1.0 + +julia> nonlinear_ifelse(x, 1.0, 2.0) +ifelse(x, 1.0, 2.0) +``` +""" +nonlinear_ifelse(a, x, y) = ifelse(a, x, y) + +""" + nonlinear_and(x, y) + +A function that falls back to `x && y`, but when called with JuMP variables or +expressions, returns a [`GenericNonlinearExpr`](@ref). + +## Example + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> nonlinear_and(true, false) +false + +julia> nonlinear_and(true, x) +true && x +``` +""" +nonlinear_and(x, y) = x && y + +""" + nonlinear_or(x, y) + +A function that falls back to `x || y`, but when called with JuMP variables or +expressions, returns a [`GenericNonlinearExpr`](@ref). + +## Example + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> nonlinear_or(true, false) +true + +julia> nonlinear_or(true, x) +true || x +``` +""" +nonlinear_or(x, y) = x || y + +""" + nonlinear_less_than(x, y) + +A function that falls back to `x < y`, but when called with JuMP variables or +expressions, returns a [`GenericNonlinearExpr`](@ref). + +## Example + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> nonlinear_less_than(1, 2) +true + +julia> nonlinear_less_than(x, 2) +x < 2 +``` +""" +nonlinear_less_than(x, y) = x < y + +""" + nonlinear_greater_than(x, y) + +A function that falls back to `x > y`, but when called with JuMP variables or +expressions, returns a [`GenericNonlinearExpr`](@ref). + +## Example + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> nonlinear_greater_than(1, 2) +false + +julia> nonlinear_greater_than(x, 2) +x > 2 +``` +""" +nonlinear_greater_than(x, y) = x > y + +""" + nonlinear_less_equal(x, y) + +A function that falls back to `x <= y`, but when called with JuMP variables or +expressions, returns a [`GenericNonlinearExpr`](@ref). + +## Example + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> nonlinear_less_equal(2, 2) +true + +julia> nonlinear_less_equal(x, 2) +x <= 2 +``` +""" +nonlinear_less_equal(x, y) = x <= y + +""" + nonlinear_greater_equal(x, y) + +A function that falls back to `x >= y`, but when called with JuMP variables or +expressions, returns a [`GenericNonlinearExpr`](@ref). + +## Example + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> nonlinear_greater_equal(2, 2) +true + +julia> nonlinear_greater_equal(x, 2) +x >= 2 +``` +""" +nonlinear_greater_equal(x, y) = x >= y + +""" + nonlinear_equal_to(x, y) + +A function that falls back to `x == y`, but when called with JuMP variables or +expressions, returns a [`GenericNonlinearExpr`](@ref). + +## Example + +```jldoctest +julia> model = Model(); + +julia> @variable(model, x); + +julia> nonlinear_equal_to(2, 2) +true + +julia> nonlinear_equal_to(x, 2) +x == 2 +``` +""" +nonlinear_equal_to(x, y) = x == y function _rewrite_to_jump_logic(x) if Meta.isexpr(x, :call) op = if x.args[1] == :ifelse - return Expr(:call, _ifelse, x.args[2:end]...) + return Expr(:call, nonlinear_ifelse, x.args[2:end]...) elseif x.args[1] == :< - return Expr(:call, _less_than, x.args[2:end]...) + return Expr(:call, nonlinear_less_than, x.args[2:end]...) elseif x.args[1] == :> - return Expr(:call, _greater_than, x.args[2:end]...) + return Expr(:call, nonlinear_greater_than, x.args[2:end]...) elseif x.args[1] == :<= - return Expr(:call, _less_equal, x.args[2:end]...) + return Expr(:call, nonlinear_less_equal, x.args[2:end]...) elseif x.args[1] == :>= - return Expr(:call, _greater_equal, x.args[2:end]...) + return Expr(:call, nonlinear_greater_equal, x.args[2:end]...) elseif x.args[1] == :(==) - return Expr(:call, _equal_to, x.args[2:end]...) + return Expr(:call, nonlinear_equal_to, x.args[2:end]...) end elseif Meta.isexpr(x, :||) - return Expr(:call, _or, x.args...) + return Expr(:call, nonlinear_or, x.args...) elseif Meta.isexpr(x, :&&) - return Expr(:call, _and, x.args...) + return Expr(:call, nonlinear_and, x.args...) elseif Meta.isexpr(x, :comparison) lhs = Expr(:call, x.args[2], x.args[1], x.args[3]) rhs = Expr(:call, x.args[4], x.args[3], x.args[5]) return Expr( :call, - _and, + nonlinear_and, _rewrite_to_jump_logic(lhs), _rewrite_to_jump_logic(rhs), ) @@ -528,7 +695,7 @@ function _rewrite_expression(expr) ret = gensym() code = quote $parse_aff - $ret = $flatten($new_aff) + $ret = $flatten!($new_aff) end return ret, code end diff --git a/src/mutable_arithmetics.jl b/src/mutable_arithmetics.jl index 3e39665afcc..d7b77506520 100644 --- a/src/mutable_arithmetics.jl +++ b/src/mutable_arithmetics.jl @@ -286,11 +286,12 @@ end function _MA.add_mul(lhs::AbstractJuMPScalar, x::_Scalar, y::_Scalar) T = _MA.promote_operation(_MA.add_mul, typeof(lhs), typeof(x), typeof(y)) expr = _MA.operate(convert, T, lhs) - # We can't use `operate!!` here because that will cause a StackOverflow. - if _MA.mutability(T) == _MA.IsMutable() - return _MA.operate!(_MA.add_mul, expr, x, y) + # We can't use `operate!!` here because in the IsNotMutable case (e.g., + # NonlinearExpr), it will fallback to this method and cause a StackOverflow. + if _MA.mutability(T) == _MA.IsNotMutable() + return expr + _MA.operate(*, x, y) end - return expr + _MA.operate(*, x, y) + return _MA.operate!(_MA.add_mul, expr, x, y) end function _MA.add_mul( @@ -307,20 +308,23 @@ function _MA.add_mul( typeof.(args)..., ) expr = _MA.operate(convert, T, lhs) - # We can't use `operate!!` here because that will cause a StackOverflow. - if _MA.mutability(T) == _MA.IsMutable() - return _MA.operate!(_MA.add_mul, expr, x, y, args...) + # We can't use `operate!!` here because in the IsNotMutable case (e.g., + # NonlinearExpr), it will fallback to this method and cause a StackOverflow. + if _MA.mutability(T) == _MA.IsNotMutable() + return expr + _MA.operate(*, x, y, args...) end - return expr + _MA.operate(*, x, y, args...) + return _MA.operate!(_MA.add_mul, expr, x, y, args...) end function _MA.sub_mul(lhs::AbstractJuMPScalar, x::_Scalar, y::_Scalar) T = _MA.promote_operation(_MA.sub_mul, typeof(lhs), typeof(x), typeof(y)) expr = _MA.operate(convert, T, lhs) - if _MA.mutability(T) == _MA.IsMutable() - return _MA.operate!(_MA.sub_mul, expr, x, y) + # We can't use `operate!!` here because in the IsNotMutable case (e.g., + # NonlinearExpr), it will fallback to this method and cause a StackOverflow. + if _MA.mutability(T) == _MA.IsNotMutable() + return expr - _MA.operate(*, x, y) end - return expr - _MA.operate(*, x, y) + return _MA.operate!(_MA.sub_mul, expr, x, y) end function _MA.sub_mul( @@ -337,8 +341,10 @@ function _MA.sub_mul( typeof.(args)..., ) expr = _MA.operate(convert, T, lhs) - if _MA.mutability(T) == _MA.IsMutable() - return _MA.operate!(_MA.sub_mul, expr, x, y, args...) + # We can't use `operate!!` here because in the IsNotMutable case (e.g., + # NonlinearExpr), it will fallback to this method and cause a StackOverflow. + if _MA.mutability(T) == _MA.IsNotMutable() + return expr - _MA.operate(*, x, y, args...) end - return expr - _MA.operate(*, x, y, args...) + return _MA.operate!(_MA.sub_mul, expr, x, y, args...) end diff --git a/src/nlp_expr.jl b/src/nlp_expr.jl index baee9fca348..38987667972 100644 --- a/src/nlp_expr.jl +++ b/src/nlp_expr.jl @@ -40,12 +40,14 @@ multiple elements. Given a subtype of [`AbstractVariableRef`](@ref), `V`, for `GenericNonlinearExpr{V}`, each element must be one of the following: - * A constant value of type `<:Number` + * A constant value of type `<:Real` * A `V` - * A [`GenericAffExpr{C,V}`](@ref) - * A [`GenericQuadExpr{C,V}`](@ref) + * A [`GenericAffExpr{T,V}`](@ref) + * A [`GenericQuadExpr{T,V}`](@ref) * A [`GenericNonlinearExpr{V}`](@ref) +where `T<:Real` and `T == value_type(V)`. + ## Unsupported operators If the optimizer does not support `head`, an [`MOI.UnsupportedNonlinearOperator`](@ref) @@ -157,7 +159,9 @@ function function_string(::MIME"text/plain", x::GenericNonlinearExpr) push!(stack, arg.args[i]) push!(stack, ", ") end - push!(stack, arg.args[1]) + if length(arg.args) >= 1 + push!(stack, arg.args[1]) + end end else print(io, arg) @@ -173,11 +177,21 @@ function function_string(::MIME"text/latex", x::GenericNonlinearExpr) arg = pop!(stack) if arg isa GenericNonlinearExpr if arg.head in _PREFIX_OPERATORS && length(arg.args) > 1 - print(io, "\\left({") - push!(stack, "}\\right)") + if _needs_parentheses(arg.args[1]) + print(io, "\\left({") + end + if _needs_parentheses(arg.args[end]) + push!(stack, "}\\right)") + end for i in length(arg.args):-1:2 push!(stack, arg.args[i]) + if _needs_parentheses(arg.args[i]) + push!(stack, "\\left({") + end push!(stack, "} $(arg.head) {") + if _needs_parentheses(arg.args[i-1]) + push!(stack, "}\\right)") + end end push!(stack, arg.args[1]) else @@ -187,7 +201,9 @@ function function_string(::MIME"text/latex", x::GenericNonlinearExpr) push!(stack, arg.args[i]) push!(stack, "}, {") end - push!(stack, arg.args[1]) + if length(arg.args) >= 1 + push!(stack, arg.args[1]) + end end else print(io, arg) @@ -275,6 +291,25 @@ end # Univariate operators +_is_real(::Any) = false +_is_real(::Real) = true +_is_real(::AbstractVariableRef) = true +_is_real(::GenericAffExpr{<:Real}) = true +_is_real(::GenericQuadExpr{<:Real}) = true +_is_real(::GenericNonlinearExpr) = true +_is_real(::NonlinearExpression) = true +_is_real(::NonlinearParameter) = true + +function _throw_if_not_real(x) + if !_is_real(x) + error( + "Cannot build `GenericNonlinearExpr` because a term is " * + "complex-valued: `($x)::$(typeof(x))`", + ) + end + return +end + for f in MOI.Nonlinear.DEFAULT_UNIVARIATE_OPERATORS op = Meta.quot(f) if f == :+ @@ -285,6 +320,7 @@ for f in MOI.Nonlinear.DEFAULT_UNIVARIATE_OPERATORS end elseif isdefined(Base, f) @eval function Base.$(f)(x::AbstractJuMPScalar) + _throw_if_not_real(x) return GenericNonlinearExpr{variable_ref_type(x)}($op, x) end elseif isdefined(MOI.Nonlinear, :SpecialFunctions) @@ -292,6 +328,7 @@ for f in MOI.Nonlinear.DEFAULT_UNIVARIATE_OPERATORS SF = MOI.Nonlinear.SpecialFunctions if isdefined(SF, f) @eval function $(SF).$(f)(x::AbstractJuMPScalar) + _throw_if_not_real(x) return GenericNonlinearExpr{variable_ref_type(x)}($op, x) end end @@ -310,14 +347,20 @@ for f in (:+, :-, :*, :^, :/, :atan) op = Meta.quot(f) @eval begin function Base.$(f)(x::AbstractJuMPScalar, y::_Constant) + _throw_if_not_real(x) + _throw_if_not_real(y) rhs = convert(Float64, _constant_to_number(y)) return GenericNonlinearExpr{variable_ref_type(x)}($op, x, rhs) end function Base.$(f)(x::_Constant, y::AbstractJuMPScalar) + _throw_if_not_real(x) + _throw_if_not_real(y) lhs = convert(Float64, _constant_to_number(x)) return GenericNonlinearExpr{variable_ref_type(y)}($op, lhs, y) end function Base.$(f)(x::AbstractJuMPScalar, y::AbstractJuMPScalar) + _throw_if_not_real(x) + _throw_if_not_real(y) return GenericNonlinearExpr{variable_ref_type(x)}($op, x, y) end end @@ -328,6 +371,7 @@ function _MA.operate!!( x::GenericNonlinearExpr, y::AbstractJuMPScalar, ) + _throw_if_not_real(x) if x.head == :+ push!(x.args, y) return x @@ -336,10 +380,10 @@ function _MA.operate!!( end """ - flatten(expr::GenericNonlinearExpr) + flatten!(expr::GenericNonlinearExpr) -Flatten a nonlinear expression by lifting nested `+` and `*` nodes into a single -n-ary operation. +Flatten a nonlinear expression in-place by lifting nested `+` and `*` nodes into +a single n-ary operation. ## Motivation @@ -358,54 +402,86 @@ x julia> y = prod(x for i in 1:4) ((x²) * x) * x -julia> flatten(y) +julia> flatten!(y) (x²) * x * x -julia> flatten(sin(y)) +julia> flatten!(sin(prod(x for i in 1:4))) sin((x²) * x * x) ``` """ -function flatten(expr::GenericNonlinearExpr{V}) where {V} - root = GenericNonlinearExpr{V}(expr.head, Any[]) - nodes_to_visit = Any[(root, arg) for arg in reverse(expr.args)] - while !isempty(nodes_to_visit) - parent, arg = pop!(nodes_to_visit) - if !(arg isa GenericNonlinearExpr) - # Not a nonlinear expression, so can use recursion. - push!(parent.args, flatten(arg)) - elseif parent.head in (:+, :*) && arg.head == parent.head - # A special case: the arg can be lifted to an n-ary argument of the - # parent. - for n in reverse(arg.args) - push!(nodes_to_visit, (parent, n)) +function flatten!(expr::GenericNonlinearExpr{V}) where {V} + if !any(Base.Fix1(_needs_flatten, expr), expr.args) + return expr + end + stack = Tuple{GenericNonlinearExpr{V},Int,GenericNonlinearExpr{V}}[] + for i in length(expr.args):-1:1 + if _needs_flatten(expr, expr.args[i]) + push!(stack, (expr, i, expr.args[i])) + end + end + while !isempty(stack) + parent, i, arg = pop!(stack) + if parent.head in (:+, :*) && arg.head == parent.head + n = length(parent.args) + resize!(parent.args, n + length(arg.args) - 1) + for j in length(arg.args):-1:1 + parent_index = j == 1 ? i : n + j - 1 + if _needs_flatten(parent, arg.args[j]) + push!(stack, (parent, parent_index, arg.args[j])) + else + parent.args[parent_index] = arg.args[j] + end end else - # The default case for nonlinear expressions. Put the args on the - # stack, so that we may walk them later. - for n in reverse(arg.args) - push!(nodes_to_visit, (arg, n)) + parent.args[i] = arg + for j in length(arg.args):-1:1 + if _needs_flatten(arg, arg.args[j]) + push!(stack, (arg, j, arg.args[j])) + end end - empty!(arg.args) - push!(parent.args, arg) end end - return root + return expr end -flatten(expr) = expr +flatten!(expr) = expr + +_is_expr(::Any, ::Any) = false +_is_expr(x::GenericNonlinearExpr, op::Symbol) = x.head == op + +_needs_flatten(::GenericNonlinearExpr, ::Any) = false + +function _needs_flatten(parent::GenericNonlinearExpr, arg::GenericNonlinearExpr) + if _is_expr(parent, :+) + return _is_expr(arg, :+) + elseif _is_expr(parent, :*) + return _is_expr(arg, :*) + else + # Heuristic: we decide to flatten if `parent` is not a + or * operator, + # but if one level down there are + or * nodes. This let's us flatten + # sin(+(x, +(y, z)) => sin(+(x, y, z)), but not a more complicated + # expression like log(sin(+(x, +(y, z))). + # + # If you have a benchmark that requires modifying this code, consider + # instead addinng `flatten!(::Any; force::Bool)` that would allow the + # user to override this decision and flatten the entire tree. + return any(Base.Fix2(_is_expr, :+), arg.args) || + any(Base.Fix2(_is_expr, :*), arg.args) + end +end -function _ifelse(a::AbstractJuMPScalar, x, y) +function nonlinear_ifelse(a::AbstractJuMPScalar, x, y) return GenericNonlinearExpr{variable_ref_type(a)}(:ifelse, Any[a, x, y]) end for (f, op) in ( - :_and => :&&, - :_or => :||, - :_less_than => :(<), - :_greater_than => :(>), - :_less_equal => :(<=), - :_greater_equal => :(>=), - :_equal_to => :(==), + :nonlinear_and => :&&, + :nonlinear_or => :||, + :nonlinear_less_than => :(<), + :nonlinear_greater_than => :(>), + :nonlinear_less_equal => :(<=), + :nonlinear_greater_equal => :(>=), + :nonlinear_equal_to => :(==), ) op = Meta.quot(op) @eval begin @@ -448,24 +524,28 @@ function check_belongs_to_model( return end -function moi_function(f::GenericNonlinearExpr) - ret = MOI.ScalarNonlinearFunction(f.head, Any[]) - stack = Tuple{MOI.ScalarNonlinearFunction,Any}[] - for arg in reverse(f.args) - push!(stack, (ret, arg)) +moi_function(x::Number) = x + +function moi_function(f::GenericNonlinearExpr{V}) where {V} + ret = MOI.ScalarNonlinearFunction(f.head, similar(f.args)) + stack = Tuple{MOI.ScalarNonlinearFunction,Int,GenericNonlinearExpr{V}}[] + for i in length(f.args):-1:1 + if f.args[i] isa GenericNonlinearExpr{V} + push!(stack, (ret, i, f.args[i])) + else + ret.args[i] = moi_function(f.args[i]) + end end while !isempty(stack) - parent, arg = pop!(stack) - if arg isa GenericNonlinearExpr - new_ret = MOI.ScalarNonlinearFunction(arg.head, Any[]) - push!(parent.args, new_ret) - for child in reverse(arg.args) - push!(stack, (new_ret, child)) + parent, i, arg = pop!(stack) + child = MOI.ScalarNonlinearFunction(arg.head, similar(arg.args)) + parent.args[i] = child + for j in length(arg.args):-1:1 + if arg.args[j] isa GenericNonlinearExpr{V} + push!(stack, (child, j, arg.args[j])) + else + child.args[j] = moi_function(arg.args[j]) end - elseif arg isa Number - push!(parent.args, arg) - else - push!(parent.args, moi_function(arg)) end end return ret @@ -709,11 +789,12 @@ function _MA.promote_operation( end """ - UserDefinedFunction(head::Symbol) + UserDefinedFunction(head::Symbol, func::Function) + +A struct representing a user-defined function named `head`. -A struct representing a user-defined function named `head`. This function must -have already been added to the model using [`add_user_defined_function`](@ref) -or [`@register`](@ref). +This function must have already been added to the model using +[`add_user_defined_function`](@ref) or [`@register`](@ref). ## Example @@ -732,22 +813,29 @@ julia> ∇f(x::Float64) = 2 * x julia> ∇²f(x::Float64) = 2.0 ∇²f (generic function with 1 method) -julia> add_user_defined_function(model, :foo, 1, f, ∇f, ∇²f) -UserDefinedFunction(:foo) +julia> @register(model, udf_f, 1, f, ∇f, ∇²f) +UserDefinedFunction{typeof(f)}(:udf_f, f) -julia> bar = UserDefinedFunction(:foo) -UserDefinedFunction(:foo) +julia> bar = UserDefinedFunction(:udf_f, f) +UserDefinedFunction{typeof(f)}(:udf_f, f) julia> @objective(model, Min, bar(x)) -foo(x) +udf_f(x) + +julia> bar(2.0) +4.0 ``` """ -struct UserDefinedFunction +struct UserDefinedFunction{F} head::Symbol + func::F end function (f::UserDefinedFunction)(args...) - return GenericNonlinearExpr(f.head, Any[a for a in args]) + if any(Base.Fix2(isa, AbstractJuMPScalar), args) + return GenericNonlinearExpr(f.head, Any[a for a in args]) + end + return f.func(args...) end """ @@ -784,11 +872,14 @@ julia> ∇f(x::Float64) = 2 * x julia> ∇²f(x::Float64) = 2.0 ∇²f (generic function with 1 method) -julia> foo = add_user_defined_function(model, :foo, 1, f, ∇f, ∇²f) -UserDefinedFunction(:foo) +julia> udf_f = add_user_defined_function(model, :udf_f, 1, f, ∇f, ∇²f) +UserDefinedFunction{typeof(f)}(:udf_f, f) -julia> @objective(model, Min, foo(x)) -foo(x) +julia> @objective(model, Min, udf_f(x)) +udf_f(x) + +julia> udf_f(2.0) +4.0 ``` """ function add_user_defined_function( @@ -810,7 +901,7 @@ function add_user_defined_function( # MOI.Nonlinear will automatically check for autodiff and common mistakes # and throw a nice informative error. MOI.set(model, MOI.UserDefinedFunction(op, dim), args) - return UserDefinedFunction(op) + return UserDefinedFunction(op, args[1]) end """ @@ -836,11 +927,20 @@ julia> ∇f(x::Float64) = 2 * x julia> ∇²f(x::Float64) = 2.0 ∇²f (generic function with 1 method) -julia> @register(model, foo, 1, f, ∇f, ∇²f) -UserDefinedFunction(:foo) +julia> @register(model, udf_f, 1, f, ∇f, ∇²f) +UserDefinedFunction{typeof(f)}(:udf_f, f) + +julia> @objective(model, Min, udf_f(x)) +udf_f(x) + +julia> udf_f(2.0) +4.0 + +julia> model[:udf_f] +UserDefinedFunction{typeof(f)}(:udf_f, f) -julia> @objective(model, Min, foo(x)) -foo(x) +julia> model[:udf_f](x) +udf_f(x) ``` ## Non-macro version @@ -849,29 +949,40 @@ This macro is provided as helpful syntax that matches the style of the rest of the JuMP macros. However, you may also create user-defined functions outside the macros using [`add_user_defined_function`](@ref). For example: -```julia +```jldoctest julia> model = Model(); -julia> @register(model, f, 1, x -> x^2) -UserDefinedFunction(:f) +julia> f(x) = x^2 +f (generic function with 1 method) + +julia> @register(model, udf_f, 1, f) +UserDefinedFunction{typeof(f)}(:udf_f, f) ``` is equivalent to -```julia +```jldoctest julia> model = Model(); -julia> f = add_user_defined_function(model, :f, 1, x -> x^2) -UserDefinedFunction(:f) +julia> f(x) = x^2 +f (generic function with 1 method) + +julia> udf_f = model[:udf_f] = add_user_defined_function(model, :udf_f, 1, f) +UserDefinedFunction{typeof(f)}(:udf_f, f) ``` """ macro register(model, op, args...) - rhs = Expr( + code = Expr( :call, add_user_defined_function, esc(model), Meta.quot(op), esc.(args)..., ) - return Expr(:(=), esc(op), rhs) + return _macro_assign_and_return( + code, + gensym(), + op; + model_for_registering = esc(model), + ) end function jump_function_type( diff --git a/src/variables.jl b/src/variables.jl index f6788d37a44..c66feaf0642 100644 --- a/src/variables.jl +++ b/src/variables.jl @@ -2043,9 +2043,9 @@ There are three common mistakes that lead to this. ```julia foo(x) = x $(sym) 1 ? 0 : 1 - x model = Model() - @register(model, my_foo, 1, foo) + @register(model, udf_f, 1, foo) @variable(model, x) - @expression(model, my_foo(x)) + @expression(model, udf_f(x)) ``` 3. You tried to create a logical nonlinear expression outside a macro, for diff --git a/test/perf/NonlinearBenchmark.jl b/test/perf/NonlinearBenchmark.jl new file mode 100644 index 00000000000..89484dd96ad --- /dev/null +++ b/test/perf/NonlinearBenchmark.jl @@ -0,0 +1,543 @@ +module NonlinearBenchmark + +using JuMP +import BenchmarkTools +import DataFrames +import Ipopt +import Random + +function benchmark_group() + lookup = Dict("perf_nl_" => "@NL", "perf_nlexpr_" => "NonlinearExpr") + suite = BenchmarkTools.BenchmarkGroup() + for v in values(lookup) + suite[v] = BenchmarkTools.BenchmarkGroup() + end + for name in names(@__MODULE__; all = true) + f = getfield(@__MODULE__, name) + for (k, v) in lookup + if startswith("$name", k) + fname = replace("$name", k => "") + suite[v][fname] = BenchmarkTools.@benchmarkable $f() + break + end + end + end + return suite +end + +function runbenchmarks() + suite = benchmark_group() + results = BenchmarkTools.run(suite) + df_time = build_table(x -> minimum(x).time / 1e9, results) + df_memory = build_table(x -> minimum(x).memory / 1024^2, results) + @info "minimum(time) [s]" + display(df_time) + @info "minimum(memory) [MiB]" + display(df_memory) + return results +end + +function build_table(f, results) + tables = map(sort!(collect(keys(results["NonlinearExpr"])))) do b + old = f(results["@NL"][b]) + new = f(results["NonlinearExpr"][b]) + return (benchmark = b, NL = old, NonlinearExpr = new, ratio = new / old) + end + return DataFrames.DataFrame(tables) +end + +# sum +# +# nlexpr is slower because it builds up the product via operator overloading, +# creating a lot of temporary objects. @NL gets to see the full +(args...) to it +# builds the expression in-place. +# +# We could fix this by implementing a n-argy method for +, but that gets +# difficult with method ambiguities. + +function perf_nl_micro_sum() + model = Model() + @variable(model, x) + @NLobjective(model, Min, sum(x^i for i in 1:10_000)) + return +end + +function perf_nlexpr_micro_sum() + model = Model() + @variable(model, x) + @objective(model, Min, sum(x^i for i in 1:10_000)) + return +end + +# prod +# +# nlexpr is slower because it builds up the product via operator overloading, +# creating a lot of temporary objects. @NL gets to see the full *(args...) to it +# builds the expression in-place. +# +# We could fix this by implementing a n-argy method for *, but that gets +# difficult with method ambiguities. + +function perf_nl_micro_prod() + model = Model() + @variable(model, x) + @NLobjective(model, Min, prod(x^i for i in 1:10_000)) + return +end + +function perf_nlexpr_micro_prod() + model = Model() + @variable(model, x) + @objective(model, Min, prod(x^i for i in 1:10_000)) + return +end + +# many_constraints + +function perf_nl_micro_many_constraints() + model = Model() + @variable(model, x[1:10_000]) + @NLconstraint(model, [i = 1:10_000], sin(x[i]) <= cos(i)) + return +end + +function perf_nlexpr_micro_many_constraints() + model = Model() + @variable(model, x[1:10_000]) + @constraint(model, [i = 1:10_000], sin(x[i]) <= cos(i)) + return +end + +# value_expr_many_small + +function perf_nl_micro_value_expr_many_small() + model = Model() + @variable(model, x) + @NLexpression(model, expr[i = 1:10_000], x^i) + value.(x -> 2.0, expr) + return +end + +function perf_nlexpr_micro_value_expr_many_small() + model = Model() + @variable(model, x) + @expression(model, expr[i = 1:10_000], x^i) + value.(x -> 2.0, expr) + return +end + +# value_expr_few_large + +function perf_nl_micro_value_expr_few_large() + model = Model() + @variable(model, x) + @NLexpression(model, expr, sum(x^i for i in 1:10_000)) + value(x -> 2.0, expr) + return +end + +function perf_nlexpr_micro_value_expr_few_large() + model = Model() + @variable(model, x) + @expression(model, expr, sum(x^i for i in 1:10_000)) + value(x -> 2.0, expr) + return +end + +# mle + +function perf_nl_model_mle() + Random.seed!(1234) + n = 1_000 + data = randn(n) + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, μ, start = 0.0) + @variable(model, σ >= 0.0, start = 1.0) + @NLobjective( + model, + Max, + n / 2 * log(1 / (2 * π * σ^2)) - + sum((data[i] - μ)^2 for i in 1:n) / (2 * σ^2) + ) + optimize!(model) + return +end + +function perf_nlexpr_model_mle() + Random.seed!(1234) + n = 1_000 + data = randn(n) + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, μ, start = 0.0) + @variable(model, σ >= 0.0, start = 1.0) + @objective( + model, + Max, + n / 2 * log(1 / (2 * π * σ^2)) - + sum((data[i] - μ)^2 for i in 1:n) / (2 * σ^2) + ) + optimize!(model) + return +end + +# clnlbeam + +function perf_nl_model_clnlbeam() + N = 1000 + h = 1 / N + alpha = 350 + model = Model(Ipopt.Optimizer) + set_silent(model) + @variables(model, begin + -1 <= t[1:(N+1)] <= 1 + -0.05 <= x[1:(N+1)] <= 0.05 + u[1:(N+1)] + end) + @NLobjective( + model, + Min, + sum( + 0.5 * h * (u[i+1]^2 + u[i]^2) + + 0.5 * alpha * h * (cos(t[i+1]) + cos(t[i])) for i in 1:N + ), + ) + @NLconstraint( + model, + [i = 1:N], + x[i+1] - x[i] - 0.5 * h * (sin(t[i+1]) + sin(t[i])) == 0, + ) + @constraint( + model, + [i = 1:N], + t[i+1] - t[i] - 0.5 * h * u[i+1] - 0.5 * h * u[i] == 0, + ) + optimize!(model) + return +end + +function perf_nlexpr_model_clnlbeam() + N = 1000 + h = 1 / N + alpha = 350 + model = Model(Ipopt.Optimizer) + set_silent(model) + @variables(model, begin + -1 <= t[1:(N+1)] <= 1 + -0.05 <= x[1:(N+1)] <= 0.05 + u[1:(N+1)] + end) + @objective( + model, + Min, + sum( + 0.5 * h * (u[i+1]^2 + u[i]^2) + + 0.5 * alpha * h * (cos(t[i+1]) + cos(t[i])) for i in 1:N + ), + ) + @constraint( + model, + [i = 1:N], + x[i+1] - x[i] - 0.5 * h * (sin(t[i+1]) + sin(t[i])) == 0, + ) + @constraint( + model, + [i = 1:N], + t[i+1] - t[i] - 0.5 * h * u[i+1] - 0.5 * h * u[i] == 0, + ) + optimize!(model) + return +end + +# rosenbrock + +function perf_nl_model_rosenbrock() + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, x) + @variable(model, y) + @NLobjective(model, Min, (1 - x)^2 + 100 * (y - x^2)^2) + optimize!(model) + return +end + +function perf_nlexpr_model_rosenbrock() + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, x) + @variable(model, y) + @objective(model, Min, (1 - x)^2 + 100 * (y - x^2)^2) + optimize!(model) + return +end + +# JuMP#2788 + +function perf_nl_model_jump_2788() + N = 400 + Random.seed!(1234) + k = N + n = 12 + p = rand(400:700, k, 1) + c1 = rand(100:200, k, n) + c2 = 0.9 .* c1 + b = rand(150:250, k, 1) + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, 0 <= x[i = 1:n] <= 1) + @variable(model, 0 <= var1 <= 1) + @variable(model, 0 <= var2 <= 1) + @variable(model, 0 <= var3 <= 1) + @objective(model, Max, var1 - var2 + var3) + @NLexpression(model, expr, sum(x[i] * p[i] for i in 1:n)) + @NLexpression(model, expr_c1[j = 1:k], sum(x[i] * c1[j, i] for i in 1:n)) + @NLexpression(model, expr_c2[j = 1:k], sum(x[i] * c2[j, i] for i in 1:n)) + @NLconstraint(model, expr == sum(b[j] / (1 + var1)^j for j in 1:k)) + @NLconstraint(model, expr == sum(expr_c1[j] / (1 + var2)^j for j in 1:k)) + @NLconstraint(model, expr == sum(expr_c2[j] / (1 + var3)^j for j in 1:k)) + @NLconstraint(model, [j = 1:k], expr_c1[j] >= b[j]) + optimize!(model) + return +end + +function perf_nlexpr_model_jump_2788() + N = 400 + Random.seed!(1234) + k = N + n = 12 + p = rand(400:700, k, 1) + c1 = rand(100:200, k, n) + c2 = 0.9 .* c1 + b = rand(150:250, k, 1) + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, 0 <= x[i = 1:n] <= 1) + @variable(model, 0 <= var1 <= 1) + @variable(model, 0 <= var2 <= 1) + @variable(model, 0 <= var3 <= 1) + @objective(model, Max, var1 - var2 + var3) + @expression(model, expr, sum(x[i] * p[i] for i in 1:n)) + @expression(model, expr_c1[j = 1:k], sum(x[i] * c1[j, i] for i in 1:n)) + @expression(model, expr_c2[j = 1:k], sum(x[i] * c2[j, i] for i in 1:n)) + @constraint(model, expr == sum(b[j] / (1 + var1)^j for j in 1:k)) + @constraint(model, expr == sum(expr_c1[j] / (1 + var2)^j for j in 1:k),) + @constraint(model, expr == sum(expr_c2[j] / (1 + var3)^j for j in 1:k),) + @constraint(model, [j = 1:k], expr_c1[j] >= b[j]) + optimize!(model) + return +end + +# nested_problems + +function perf_nl_model_nested_problems() + function solve_lower_level(x...) + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, y[1:2]) + @NLobjective( + model, + Max, + x[1]^2 * y[1] + x[2]^2 * y[2] - x[1] * y[1]^4 - 2 * x[2] * y[2]^4, + ) + @constraint(model, (y[1] - 10)^2 + (y[2] - 10)^2 <= 25) + optimize!(model) + @assert termination_status(model) == LOCALLY_SOLVED + return objective_value(model), value.(y) + end + function V(x...) + f, _ = solve_lower_level(x...) + return f + end + function ∇V(g::AbstractVector, x...) + _, y = solve_lower_level(x...) + g[1] = 2 * x[1] * y[1] - y[1]^4 + g[2] = 2 * x[2] * y[2] - 2 * y[2]^4 + return + end + function ∇²V(H::AbstractMatrix, x...) + _, y = solve_lower_level(x...) + H[1, 1] = 2 * y[1] + H[2, 2] = 2 * y[2] + return + end + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, x[1:2] >= 0) + register(model, :f_V, 2, V, ∇V, ∇²V) + @NLobjective(model, Min, x[1]^2 + x[2]^2 + f_V(x[1], x[2])) + optimize!(model) + solution_summary(model) + return +end + +function perf_nlexpr_model_nested_problems() + function solve_lower_level(x...) + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, y[1:2]) + @objective( + model, + Max, + x[1]^2 * y[1] + x[2]^2 * y[2] - x[1] * y[1]^4 - 2 * x[2] * y[2]^4, + ) + @constraint(model, (y[1] - 10)^2 + (y[2] - 10)^2 <= 25) + optimize!(model) + @assert termination_status(model) == LOCALLY_SOLVED + return objective_value(model), value.(y) + end + function V(x...) + f, _ = solve_lower_level(x...) + return f + end + function ∇V(g::AbstractVector, x...) + _, y = solve_lower_level(x...) + g[1] = 2 * x[1] * y[1] - y[1]^4 + g[2] = 2 * x[2] * y[2] - 2 * y[2]^4 + return + end + function ∇²V(H::AbstractMatrix, x...) + _, y = solve_lower_level(x...) + H[1, 1] = 2 * y[1] + H[2, 2] = 2 * y[2] + return + end + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, x[1:2] >= 0) + @register(model, f_V, 2, V, ∇V, ∇²V) + @objective(model, Min, x[1]^2 + x[2]^2 + f_V(x[1], x[2])) + optimize!(model) + solution_summary(model) + return +end + +# ### + +function perf_nl_model_votroto() + Q = -0.8:0.4:0.8 + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, -2 <= p[1:5] <= 2) + @variable(model, -1 <= w <= 3) + @variable(model, -1 <= q <= 3) + @objective(model, Min, w) + total = Dict( + _q => @NLexpression( + model, + sum( + _p / sqrt(2π) * exp(-(i - _q)^2 / 2) for + (i, _p) in enumerate(p) + ) + ) for _q in Any[Q; q; 0.5] + ) + l1 = Dict( + _q => @NLexpression(model, 1 - total[_q] + 0.5 * total[0.5]) for + _q in Any[Q; q] + ) + @NLconstraint( + model, + [_q in Q], + w * (l1[q] - l1[_q]) + (1 - w) * (total[q] - 1) <= 0 + ) + optimize!(model) + return +end + +function perf_nlexpr_model_votroto() + Q = -0.8:0.4:0.8 + model = Model(Ipopt.Optimizer) + set_silent(model) + @variable(model, -2 <= p[1:5] <= 2) + @variable(model, -1 <= w <= 3) + @variable(model, -1 <= q <= 3) + @objective(model, Min, w) + f(p, q) = (1 / sqrt(2π)) * exp(-((p - q)^2) / 2) + total(p, q) = sum(_p * f(i, q) for (i, _p) in enumerate(p)) + l1(p, q) = 1 - total(p, q) + 0.5 * total(p, 0.5) + l2(p, q) = total(p, q) - 1 + lhs(p, q, _q) = l1(p, q) - l1(p, _q) + @constraint(model, [_q in Q], w * lhs(p, q, _q) + (1 - w) * l2(p, q) <= 0) + optimize!(model) + return +end + +# large_expressions + +function perf_nl_model_large_expressions() + N = 50_000 + model = Model(Ipopt.Optimizer) + set_silent(model) + set_attribute(model, "max_iter", 1) + @variable(model, y[1:N], start = 1) + @variable(model, z[1:N]) + @NLobjective(model, Max, sum(2z[i]^2 + sin(1 / y[i]) for i in 1:N)) + @NLconstraint( + model, + [i = 1:N], + ifelse(z[i] <= y[i]^3, log(y[i] / i), z[i] / cos(y[i])) <= 42, + ) + @NLconstraint(model, sum(z[i]^i + log(y[i]) for i in 1:N) == 0) + optimize!(model) + return +end + +function perf_nlexpr_model_large_expressions() + N = 50_000 + model = Model(Ipopt.Optimizer) + set_silent(model) + set_attribute(model, "max_iter", 1) + @variable(model, y[1:N], start = 1) + @variable(model, z[1:N]) + @objective(model, Max, sum(2z[i]^2 + sin(1 / y[i]) for i in 1:N)) + @constraint( + model, + [i = 1:N], + ifelse(z[i] <= y[i]^3, log(y[i] / i), z[i] / cos(y[i])) <= 42, + ) + @constraint(model, sum(z[i]^i + log(y[i]) for i in 1:N) == 0) + optimize!(model) + return +end + +# large_expressions_2 + +function perf_nl_model_large_expressions_2() + N = 100 + model = Model(Ipopt.Optimizer) + set_silent(model) + set_attribute(model, "max_iter", 1) + @variable(model, y[1:N], start = 1) + @variable(model, z[1:N]) + @NLobjective(model, Max, sum(2z[i]^2 + sin(1 / y[i]) for i in 1:N)) + @NLconstraint( + model, + prod( + ifelse(z[i] <= y[i]^3, log(y[i] / i), z[i] / cos(y[i])) for i in 1:N + ) <= 42 + ) + @NLconstraint(model, sum(z[i]^i + log(y[i]) for i in 1:N) == 0) + optimize!(model) + return +end + +function perf_nlexpr_model_large_expressions_2() + N = 100 + model = Model(Ipopt.Optimizer) + set_silent(model) + set_attribute(model, "max_iter", 1) + @variable(model, y[1:N], start = 1) + @variable(model, z[1:N]) + @objective(model, Max, sum(2z[i]^2 + sin(1 / y[i]) for i in 1:N)) + @constraint( + model, + prod( + ifelse(z[i] <= y[i]^3, log(y[i] / i), z[i] / cos(y[i])) for i in 1:N + ) <= 42 + ) + @constraint(model, sum(z[i]^i + log(y[i]) for i in 1:N) == 0) + optimize!(model) + return +end + +end # module diff --git a/test/test_nlp_expr.jl b/test/test_nlp_expr.jl index 3af788cd72d..87eec9a8b14 100644 --- a/test/test_nlp_expr.jl +++ b/test/test_nlp_expr.jl @@ -95,7 +95,7 @@ function test_extension_flatten_nary( expr_plus = GenericNonlinearExpr{VariableRefType}(:+, Any[x]) expr_mult = GenericNonlinearExpr{VariableRefType}(:*, Any[x]) expr_sin = GenericNonlinearExpr{VariableRefType}(:sin, Any[x]) - to_string(x) = string(flatten(x)) + to_string(x) = string(flatten!(x)) @test to_string(+(expr_plus, 1)) == "x + 1.0" @test to_string(+(1, expr_plus)) == "1.0 + x" @test to_string(+(expr_plus, x)) == "x + x" @@ -138,7 +138,7 @@ function test_extension_latex(ModelType = Model, VariableRefType = VariableRef) @test function_string(MIME("text/plain"), sin(x)) == "sin(x)" @expression(model, g, ifelse(x > 0, sin(x), x + cos(x)^2)) @test function_string(MIME("text/latex"), g) == - raw"\textsf{ifelse}\left({\left({x} > {0}\right)}, {\textsf{sin}\left({x}\right)}, {\left({x} + {\left({\textsf{cos}\left({x}\right)} ^ {2.0}\right)}\right)}\right)" + raw"\textsf{ifelse}\left({x} > {0}, {\textsf{sin}\left({x}\right)}, {x} + {\left({\textsf{cos}\left({x}\right)} ^ {2.0}\right)}\right)" return end @@ -476,6 +476,16 @@ function test_register_univariate() return end +function test_register_eval_non_jump() + model = Model() + @variable(model, x) + @register(model, f, 1, x -> x^2) + @test f(2.0) == 4.0 + @register(model, g, 2, (x, y) -> x^2 - sin(y)) + @test g(2.0, 3.0) == 4.0 - sin(3.0) + return +end + function test_register_univariate_gradient() model = Model() @variable(model, x) @@ -591,7 +601,7 @@ function test_value_expression() y = QuadExpr(x + 1) @test value(f, my_foo(y)) ≈ (value(f, y) - 1)^2 @test value(f, my_bar(2.2, x)) ≈ sqrt(2.2 - 1.1) - bad_udf = UserDefinedFunction(:bad_udf) + bad_udf = UserDefinedFunction(:bad_udf, f) @test_throws( ErrorException( "Unable to evaluate nonlinear operator bad_udf because it is not " * @@ -635,7 +645,14 @@ end function test_operate_shortcut_ma_operate!!_add_mul() model = Model() @variable(model, x) - @expression(model, sum(sin(x) for i in 1:3)) + @test isequal_canonical( + @expression(model, sum(sin(x) for i in 1:3)), + NonlinearExpr(:+, Any[sin(x), sin(x), sin(x)]), + ) + @test isequal_canonical(JuMP._MA.add_mul(sin(x), 2, x), sin(x) + 2x) + @test isequal_canonical(JuMP._MA.add_mul(sin(x), 2, x, 2), sin(x) + 4x) + @test isequal_canonical(JuMP._MA.sub_mul(sin(x), 2, x), sin(x) - 2x) + @test isequal_canonical(JuMP._MA.sub_mul(sin(x), 2, x, 2), sin(x) - 4x) return end @@ -649,7 +666,7 @@ function test_show_nonlinear_model() return end -function test_error_both_nl_interfaces() +function test_error_both_nl_interfaces_constraint() model = Model() @variable(model, x) @constraint(model, log(x) <= 1) @@ -666,6 +683,23 @@ function test_error_both_nl_interfaces() return end +function test_error_both_nl_interfaces_objective() + model = Model() + @variable(model, x) + @objective(model, Max, log(x)) + @NLconstraint(model, log(x) <= 1) + @test_throws( + ErrorException( + "Cannot optimize a model which contains the features from " * + "both the legacy (macros beginning with `@NL`) and new " * + "(`NonlinearExpr`) nonlinear interfaces. You must use one or " * + "the other.", + ), + optimize!(model), + ) + return +end + function test_VectorNonlinearFunction_moi_function() model = Model() @variable(model, x) @@ -717,4 +751,46 @@ function test_VectorNonlinearFunction_objective() return end +function test_operator_overload_complex_error() + model = Model() + @variable(model, x) + f = (1 + 2im) * x + @test_throws( + ErrorException( + "Cannot build `GenericNonlinearExpr` because a term is complex-" * + "valued: `($f)::$(typeof(f))`", + ), + sin(f), + ) + @test_throws( + ErrorException( + "Cannot build `GenericNonlinearExpr` because a term is complex-" * + "valued: `($(1 + 2im))::$(typeof(1 + 2im))`", + ), + +(sin(x), 1 + 2im), + ) + @test_throws( + ErrorException( + "Cannot build `GenericNonlinearExpr` because a term is complex-" * + "valued: `($(1 + 2im))::$(typeof(1 + 2im))`", + ), + +(1 + 2im, sin(x)), + ) + @test_throws( + ErrorException( + "Cannot build `GenericNonlinearExpr` because a term is complex-" * + "valued: `($f)::$(typeof(f))`", + ), + +(f, sin(x)), + ) + @test_throws( + ErrorException( + "Cannot build `GenericNonlinearExpr` because a term is complex-" * + "valued: `($f)::$(typeof(f))`", + ), + +(sin(x), f), + ) + return +end + end # module From d2fbbc0caa05df484a867e938af51ab0f4783844 Mon Sep 17 00:00:00 2001 From: odow Date: Wed, 23 Aug 2023 15:24:42 +1200 Subject: [PATCH 09/23] Add electricity consumption example Fix docs --- .../tutorials/nonlinear/complementarity.jl | 72 +++++++++++++++++++ docs/styles/Vocab/JuMP-Vocab/accept.txt | 2 + 2 files changed, 74 insertions(+) diff --git a/docs/src/tutorials/nonlinear/complementarity.jl b/docs/src/tutorials/nonlinear/complementarity.jl index 7495f8fbba6..ba3b65aea2c 100644 --- a/docs/src/tutorials/nonlinear/complementarity.jl +++ b/docs/src/tutorials/nonlinear/complementarity.jl @@ -140,3 +140,75 @@ set_silent(model) optimize!(model) Test.@test isapprox(value(C_G), 0.996; atol = 1e-3) #src value(K) + +# ## Electricity consumption + +# This is example is mixed complementarity formulation of example 3.3.1 from +# D’Aertrycke, G., Ehrenmann, A., Ralph, D., & Smeers, Y. (2017). [Risk trading +# in capacity equilibrium models](https://doi.org/10.17863/CAM.17552). + +# This example models a risk neutral competitive equilibrium between a producer +# and a consumer of electricity. + +# In our example, we assume a producer is looking to invest in a new power +# plant with capacity ``x`` [MW]. This plant has an annualized capital cost of +# ``I`` [€/MW] and an operating cost of ``C`` [€/MWh]. There are 8760 hours in a +# year. + +I, C, τ = 90_000, 60, 8_760 + +# After making the capital investment, there are five possible consumption +# scenarios, ``\omega``, which occur with probability ``\theta_\omega``. In each +# scenario , the producer makes ``Y\_ω`` MW of electricity. + +θ = [0.2, 0.2, 0.2, 0.2, 0.2] + +# There is one consumer in the model, who has a quadratic utility function, +# ``U(Q_ω) = A_ω Q_ω + \frac{B_ω Q_ω^2}{2}``. + +A, B = [300, 350, 400, 450, 500], 1 + +# We now build and solve the mixed complementarity problem with a few brief +# comments. The economic justification for the model would require a larger +# tutorial than the space available here. Consult the [original text](https://doi.org/10.17863/CAM.17552) +# for details. + +model = Model(PATHSolver.Optimizer) +set_silent(model) +## Capital investment +@variable(model, x >= 0, start = 1) +## Consumption in each scenario +@variable(model, Q[ω = 1:5] >= 0, start = 1) +## Production in each scenario +@variable(model, Y[ω = 1:5] >= 0, start = 1) +## Electricity price in each scenario +@variable(model, P[ω = 1:5], start = 1) +## Capital scarcity margin +@variable(model, μ[ω = 1:5] >= 0, start = 1) +## Capital investment must by paid for by expected annualized scarcity margin +@constraint(model, I - τ * θ' * μ ⟂ x) +## Producer's costs complement production +@constraint(model, [ω = 1:5], C - (P[ω] - μ[ω]) ⟂ Y[ω]) +## Consumer's utilitiy complements consumption +@constraint(model, [ω = 1:5], P[ω] - (A[ω] - B * Q[ω]) ⟂ Q[ω]) +## Production equals consummption +@constraint(model, [ω = 1:5], Y[ω] - Q[ω] ⟂ P[ω]) +## Capacity constraint +@constraint(model, [ω = 1:5], x - Y[ω] ⟂ μ[ω]) +optimize!(model) +solution_summary(model) + +# An equilibrium solution is to build 389 MW: + +Test.@test isapprox(value(x), 389; atol = 1) #src +value(x) + +# The production in each scenario is: + +Test.@test isapprox(value.(Q), [240, 290, 340, 389, 389]; atol = 1) #src +value.(Q) + +# The price in each scenario is: + +Test.@test isapprox(value.(P), [60, 60, 60, 61, 111]; atol = 1) #src +value.(P) diff --git a/docs/styles/Vocab/JuMP-Vocab/accept.txt b/docs/styles/Vocab/JuMP-Vocab/accept.txt index 772fe8a1995..e9d34e56170 100644 --- a/docs/styles/Vocab/JuMP-Vocab/accept.txt +++ b/docs/styles/Vocab/JuMP-Vocab/accept.txt @@ -159,6 +159,7 @@ Coey Dantzig Das Dvorkin +Ehrenmann Ehrgott Erlangen Farkas @@ -210,6 +211,7 @@ Schur Schwarz Shabbir Shuvomoy +Smeers Soodeh Steuer Stigler From ae8634fdb7d7b150f74cf3d828074d4f05bc2ed9 Mon Sep 17 00:00:00 2001 From: odow Date: Thu, 24 Aug 2023 09:02:27 +1200 Subject: [PATCH 10/23] Switch to name kwarg in add_user_defined_function Update tests Update docs More updates --- docs/src/manual/nonlinear.md | 8 +-- .../tutorials/nonlinear/complementarity.jl | 66 +++++++++---------- src/nlp_expr.jl | 59 ++++++++++------- test/test_nlp_expr.jl | 13 +++- 4 files changed, 79 insertions(+), 67 deletions(-) diff --git a/docs/src/manual/nonlinear.md b/docs/src/manual/nonlinear.md index bf361692761..bbe13e82fda 100644 --- a/docs/src/manual/nonlinear.md +++ b/docs/src/manual/nonlinear.md @@ -357,9 +357,9 @@ using JuMP square(x) = x^2 f(x, y) = (x - 1)^2 + (y - 2)^2 model = Model(); -udf_square = add_user_defined_function(model, :udf_square, 1, square) +udf_square = add_user_defined_function(model, 1, square; name = :udf_square) model[:udf_square] = udf_square -udf_f = add_user_defined_function(model, :udf_f, 2, f) +udf_f = add_user_defined_function(model, 2, f; name = :udf_f) model[:udf_f] = udf_f @variable(model, x[1:2]); @objective(model, Min, udf_f(x[1], udf_square(x[2]))) @@ -374,7 +374,7 @@ julia> @register(model, square, 1, square) ``` will error because it is equivalent to: ```julia -julia> square = add_user_defined_function(model, :square, 1, square) +julia> square = add_user_defined_function(model, 1, square; name = :square) ERROR: invalid redefinition of constant square Stacktrace: [...] @@ -422,7 +422,7 @@ To force JuMP to treat `f` as a user-defined function and not trace it, register the function using [`add_user_defined_function`](@ref) and define a new method which manually creates a [`NonlinearExpr`](@ref): ```jldoctest nonlinear_invalid_redefinition -julia> _ = add_user_defined_function(model, :f, 1, f) +julia> _ = add_user_defined_function(model, 1, f; name = :f) UserDefinedFunction{typeof(f)}(:f, f) julia> f(x::AbstractJuMPScalar) = NonlinearExpr(:f, Any[x]) diff --git a/docs/src/tutorials/nonlinear/complementarity.jl b/docs/src/tutorials/nonlinear/complementarity.jl index ba3b65aea2c..e407757020e 100644 --- a/docs/src/tutorials/nonlinear/complementarity.jl +++ b/docs/src/tutorials/nonlinear/complementarity.jl @@ -18,15 +18,15 @@ import Test #src # A mixed complementarity problem has the form: # ```math # \begin{align} -# \;\;\text{s.t.} & F_i(x) \perp x_i & i = 1 \ldots n \\ -# & l_i \le x_i \le u_i & i = 1 \ldots n. +# F_i(x) \perp x_i & i = 1 \ldots n \\ +# l_i \le x_i \le u_i & i = 1 \ldots n. # \end{align} # ``` # where the ``\perp`` constraint enforces the following relations: # -# - If ``l_i < x_i < u_i``, then ``F_i(x) == 0`` -# - If ``l_i == x_i``, then ``F_i(x) \ge 0`` -# - If ``x_i == u_i``, then ``F_i(x) \le 0`` +# - If ``l_i < x_i < u_i``, then ``F_i(x) = 0`` +# - If ``l_i = x_i``, then ``F_i(x) \ge 0`` +# - If ``x_i = u_i``, then ``F_i(x) \le 0`` # You may have seen a complementarity problem written as # ``0 \le F(x) \perp x \ge 0``. This is a special case of a mixed @@ -38,8 +38,7 @@ import Test #src # ## Linear complementarity # Form a mixed complementarity problem using the perp symbol `⟂` (type -# `\perp` in the REPL). See [Complementarity constraints](@ref) for the -# definition of a complementarity constraint. +# `\perp` in the REPL). M = [0 0 -1 -1; 0 0 1 -2; 1 -1 2 -2; 1 2 -2 4] q = [2, 2, -2, -6] @@ -54,8 +53,8 @@ value.(x) # ## Other ways of writing linear complementarity problems # You do not need to use a single vector of variables, and the complementarity -# constraints can be given in any order. In addition, you can either use the -# perp symbol, or you can use the [`MOI.Complements`](@ref) set. +# constraints can be given in any order. In addition, you can use the perp +# symbol, the `complements(F, x)` syntax, or the [`MOI.Complements`](@ref) set. model = Model(PATHSolver.Optimizer) set_silent(model) @@ -63,8 +62,8 @@ set_silent(model) @variable(model, 0 <= x <= 10, start = 0) @variable(model, 0 <= y <= 10, start = 0) @variable(model, 0 <= z <= 10, start = 0) -@constraint(model, [y - 2z + 2, x] in MOI.Complements(2)) -@constraint(model, -y - z + 2 ⟂ w) +@constraint(model, complements(y - 2z + 2, x)) +@constraint(model, [-y - z + 2, w] in MOI.Complements(2)) @constraint(model, w + 2x - 2y + 4z - 6 ⟂ z) @constraint(model, w - x + 2y - 2z - 2 ⟂ y) optimize!(model) @@ -154,46 +153,41 @@ value(K) # plant with capacity ``x`` [MW]. This plant has an annualized capital cost of # ``I`` [€/MW] and an operating cost of ``C`` [€/MWh]. There are 8760 hours in a # year. - -I, C, τ = 90_000, 60, 8_760 - +# # After making the capital investment, there are five possible consumption # scenarios, ``\omega``, which occur with probability ``\theta_\omega``. In each -# scenario , the producer makes ``Y\_ω`` MW of electricity. - -θ = [0.2, 0.2, 0.2, 0.2, 0.2] - +# scenario , the producer makes ``Y_ω`` MW of electricity. +# # There is one consumer in the model, who has a quadratic utility function, # ``U(Q_ω) = A_ω Q_ω + \frac{B_ω Q_ω^2}{2}``. - -A, B = [300, 350, 400, 450, 500], 1 - +# # We now build and solve the mixed complementarity problem with a few brief # comments. The economic justification for the model would require a larger # tutorial than the space available here. Consult the [original text](https://doi.org/10.17863/CAM.17552) # for details. +I = 90_000 # Annualized capital cost +C = 60 # Operation cost per MWh +τ = 8_760 # Hours per year +θ = [0.2, 0.2, 0.2, 0.2, 0.2] # Scenario probabilities +A = [300, 350, 400, 450, 500] # Utility function coefficients +B = 1 # Utility function coefficients model = Model(PATHSolver.Optimizer) set_silent(model) -## Capital investment -@variable(model, x >= 0, start = 1) -## Consumption in each scenario -@variable(model, Q[ω = 1:5] >= 0, start = 1) -## Production in each scenario -@variable(model, Y[ω = 1:5] >= 0, start = 1) -## Electricity price in each scenario -@variable(model, P[ω = 1:5], start = 1) -## Capital scarcity margin -@variable(model, μ[ω = 1:5] >= 0, start = 1) -## Capital investment must by paid for by expected annualized scarcity margin +@variable(model, x >= 0, start = 1) # Installed capacity +@variable(model, Q[ω = 1:5] >= 0, start = 1) # Consumption +@variable(model, Y[ω = 1:5] >= 0, start = 1) # Production +@variable(model, P[ω = 1:5], start = 1) # Electricity price +@variable(model, μ[ω = 1:5] >= 0, start = 1) # Capital scarcity margin +## Unit investment cost equals annualized scarcity profit or investment is 0 @constraint(model, I - τ * θ' * μ ⟂ x) -## Producer's costs complement production +## Difference between price and scarcity margin is equal to operation cost @constraint(model, [ω = 1:5], C - (P[ω] - μ[ω]) ⟂ Y[ω]) -## Consumer's utilitiy complements consumption +## Price is equal to consumer's marginal utility @constraint(model, [ω = 1:5], P[ω] - (A[ω] - B * Q[ω]) ⟂ Q[ω]) -## Production equals consummption +## Production is equal to consumption @constraint(model, [ω = 1:5], Y[ω] - Q[ω] ⟂ P[ω]) -## Capacity constraint +## Production does not exceed capacity @constraint(model, [ω = 1:5], x - Y[ω] ⟂ μ[ω]) optimize!(model) solution_summary(model) diff --git a/src/nlp_expr.jl b/src/nlp_expr.jl index 38987667972..381f9b46123 100644 --- a/src/nlp_expr.jl +++ b/src/nlp_expr.jl @@ -841,15 +841,15 @@ end """ add_user_defined_function( model::Model, - op::Symbol, dim::Int, f::Function, [∇f::Function,] - [∇²f::Function,] + [∇²f::Function]; + [name::Symbol = Symbol(f),] ) -Add a user-definend function with `dim` input arguments to `model` and associate -it with the operator `op`. +Add a user-defined function with `dim` input arguments to `model` and associate +it with the operator `name`. The function `f` evaluates the function. The optional function `∇f` evaluates the first derivative, and the optional function `∇²f` evaluates the second @@ -872,11 +872,11 @@ julia> ∇f(x::Float64) = 2 * x julia> ∇²f(x::Float64) = 2.0 ∇²f (generic function with 1 method) -julia> udf_f = add_user_defined_function(model, :udf_f, 1, f, ∇f, ∇²f) -UserDefinedFunction{typeof(f)}(:udf_f, f) +julia> udf_f = add_user_defined_function(model, 1, f, ∇f, ∇²f) +UserDefinedFunction{typeof(f)}(:f, f) julia> @objective(model, Min, udf_f(x)) -udf_f(x) +f(x) julia> udf_f(2.0) 4.0 @@ -884,24 +884,34 @@ julia> udf_f(2.0) """ function add_user_defined_function( model::GenericModel, - op::Symbol, dim::Int, - args::Vararg{Function,N}, + f::Function, + args::Vararg{Function,N}; + name::Symbol = Symbol(f), ) where {N} - if !(1 <= N <= 3) + nargs = 1 + N + if !(1 <= nargs <= 3) error( - "Unable to register user-defined function $op: invalid number of " * - "functions provided. Got $N, but expected 1 (if function only), " * - "2 (if function and gradient), or 3 (if function, gradient, and " * - "hesssian provided)", + "Unable to register user-defined function $name: invalid number " * + "of functions provided. Got $nargs, but expected 1 (if function " * + "only), 2 (if function and gradient), or 3 (if function, " * + "gradient, and hesssian provided)", ) end # TODO(odow): we could add other checks here, but we won't for now because # down-stream solvers in MOI can add their own checks, and any solver using # MOI.Nonlinear will automatically check for autodiff and common mistakes # and throw a nice informative error. - MOI.set(model, MOI.UserDefinedFunction(op, dim), args) - return UserDefinedFunction(op, args[1]) + MOI.set(model, MOI.UserDefinedFunction(name, dim), tuple(f, args...)) + return UserDefinedFunction(name, f) +end + +function add_user_defined_function(::GenericModel, ::Int; kwargs...) + return error( + "Unable to register user-defined function because no functions were " * + "provided. Expected 1 (if function only), 2 (if function and " * + "gradient), or 3 (if function, gradient, and hesssian provided)", + ) end """ @@ -965,20 +975,19 @@ julia> model = Model(); julia> f(x) = x^2 f (generic function with 1 method) -julia> udf_f = model[:udf_f] = add_user_defined_function(model, :udf_f, 1, f) +julia> udf_f = model[:udf_f] = add_user_defined_function(model, 1, f; name = :udf_f) UserDefinedFunction{typeof(f)}(:udf_f, f) ``` """ macro register(model, op, args...) - code = Expr( - :call, - add_user_defined_function, - esc(model), - Meta.quot(op), - esc.(args)..., - ) return _macro_assign_and_return( - code, + quote + add_user_defined_function( + $(esc(model)), + $(esc.(args)...); + name = $(Meta.quot(op)), + ) + end, gensym(), op; model_for_registering = esc(model), diff --git a/test/test_nlp_expr.jl b/test/test_nlp_expr.jl index 87eec9a8b14..2d661ea8637 100644 --- a/test/test_nlp_expr.jl +++ b/test/test_nlp_expr.jl @@ -553,14 +553,23 @@ end function test_register_errors() model = Model() + @test_throws( + ErrorException( + "Unable to register user-defined function because no functions were " * + "provided. Expected 1 (if function only), 2 (if function and " * + "gradient), or 3 (if function, gradient, and hesssian provided)", + ), + @register(model, foo, 2), + ) + f = x -> x^2 @test_throws( ErrorException( "Unable to register user-defined function foo: invalid number of " * - "functions provided. Got 0, but expected 1 (if function only), " * + "functions provided. Got 4, but expected 1 (if function only), " * "2 (if function and gradient), or 3 (if function, gradient, and " * "hesssian provided)", ), - @register(model, foo, 2), + @register(model, foo, 2, f, f, f, f), ) return end From 509401797697476f59dd9ead89241ed5b6511aa0 Mon Sep 17 00:00:00 2001 From: odow Date: Fri, 25 Aug 2023 13:14:13 +1200 Subject: [PATCH 11/23] Rename UserDefinedFunction to NonlinearOperator --- docs/src/manual/nonlinear.md | 63 +++---- .../tutorials/applications/power_systems.jl | 4 +- .../tutorials/nonlinear/nested_problems.jl | 8 +- .../tutorials/nonlinear/tips_and_tricks.jl | 16 +- .../nonlinear/user_defined_hessians.jl | 4 +- src/macros.jl | 103 ++++++----- src/nlp_expr.jl | 174 ++++++++++-------- src/variables.jl | 4 +- test/test_nlp_expr.jl | 7 +- 9 files changed, 203 insertions(+), 180 deletions(-) diff --git a/docs/src/manual/nonlinear.md b/docs/src/manual/nonlinear.md index bbe13e82fda..0c87243b58d 100644 --- a/docs/src/manual/nonlinear.md +++ b/docs/src/manual/nonlinear.md @@ -258,18 +258,15 @@ julia> expr = @expression(model, ifelse(x < -1 || x >= 1, x^2, 0.0)) ifelse((x < -1) || (x >= 1), x², 0.0) ``` -As an alternative, use the `JuMP.nonlinear_` functions, which fallback to the +As an alternative, use the `JuMP.op_` functions, which fallback to the various comparison and logical operators: ```jldoctest julia> model = Model(); julia> @variable(model, x); -julia> expr = nonlinear_ifelse( - nonlinear_or( - nonlinear_less_than(x, -1), - nonlinear_greater_equal(x, 1) - ), +julia> expr = op_ifelse( + op_or(op_less_than(x, -1), op_greater_equal(x, 1)), x^2, 0.0, ) @@ -280,14 +277,14 @@ The available functions are: | JuMP function | Julia function | | :-------------------------------- | :------------- | -| [`nonlinear_ifelse`](@ref) | `ifelse` | -| [`nonlinear_and`](@ref) | `&&` | -| [`nonlinear_or`](@ref) | `\|\|` | -| [`nonlinear_greater_than`](@ref) | `>` | -| [`nonlinear_greater_equal`](@ref) | `>=` | -| [`nonlinear_less_than`](@ref) | `<` | -| [`nonlinear_less_equal`](@ref) | `<=` | -| [`nonlinear_equal_to`](@ref) | `==` | +| [`op_ifelse`](@ref) | `ifelse` | +| [`op_and`](@ref) | `&&` | +| [`op_or`](@ref) | `\|\|` | +| [`op_greater_than`](@ref) | `>` | +| [`op_greater_equal`](@ref) | `>=` | +| [`op_less_than`](@ref) | `<` | +| [`op_less_equal`](@ref) | `<=` | +| [`op_equal_to`](@ref) | `==` | ### Fields @@ -327,10 +324,10 @@ using JuMP square(x) = x^2 f(x, y) = (x - 1)^2 + (y - 2)^2 model = Model(); -@register(model, udf_square, 1, square) -@register(model, udf_f, 2, f) +@register(model, op_square, 1, square) +@register(model, op_f, 2, f) @variable(model, x[1:2]); -@objective(model, Min, udf_f(x[1], udf_square(x[2]))) +@objective(model, Min, op_f(x[1], op_square(x[2]))) ``` The arguments to [`@register`](@ref) are: @@ -349,7 +346,7 @@ The arguments to [`@register`](@ref) are: ### Registered functions without macros The [`@register`](@ref) macro is syntactic sugar for the -[`add_user_defined_function`](@ref) method. Thus, the non-macro version of the +[`register_nonlinear_operator`](@ref) method. Thus, the non-macro version of the preceding example is: ```@repl @@ -357,12 +354,12 @@ using JuMP square(x) = x^2 f(x, y) = (x - 1)^2 + (y - 2)^2 model = Model(); -udf_square = add_user_defined_function(model, 1, square; name = :udf_square) -model[:udf_square] = udf_square -udf_f = add_user_defined_function(model, 2, f; name = :udf_f) -model[:udf_f] = udf_f +op_square = register_nonlinear_operator(model, 1, square; name = :op_square) +model[:op_square] = op_square +op_f = register_nonlinear_operator(model, 2, f; name = :op_f) +model[:op_f] = op_f @variable(model, x[1:2]); -@objective(model, Min, udf_f(x[1], udf_square(x[2]))) +@objective(model, Min, op_f(x[1], op_square(x[2]))) ``` This has two important consequences. @@ -374,7 +371,7 @@ julia> @register(model, square, 1, square) ``` will error because it is equivalent to: ```julia -julia> square = add_user_defined_function(model, 1, square; name = :square) +julia> square = register_nonlinear_operator(model, 1, square; name = :square) ERROR: invalid redefinition of constant square Stacktrace: [...] @@ -388,8 +385,8 @@ Second, you can obtain a reference to the user-defined function using the using JuMP square(x) = x^2 model = Model(); -@register(model, udf_square, 1, square) -udf_square_2 = model[:udf_square] +@register(model, op_square, 1, square) +op_square_2 = model[:op_square] ``` ### Invalid redefinition of constant @@ -419,11 +416,11 @@ x² ``` To force JuMP to treat `f` as a user-defined function and not trace it, register -the function using [`add_user_defined_function`](@ref) and define a new method +the function using [`register_nonlinear_operator`](@ref) and define a new method which manually creates a [`NonlinearExpr`](@ref): ```jldoctest nonlinear_invalid_redefinition -julia> _ = add_user_defined_function(model, 1, f; name = :f) -UserDefinedFunction{typeof(f)}(:f, f) +julia> _ = register_nonlinear_operator(model, 1, f; name = :f) +NonlinearOperator(:f, f) julia> f(x::AbstractJuMPScalar) = NonlinearExpr(:f, Any[x]) f (generic function with 2 methods) @@ -451,9 +448,9 @@ f(x) = x^2 ∇f(x) = 2x ∇²f(x) = 2 model = Model(); -@register(model, udf_f, 1, f, ∇f, ∇²f) # Providing ∇²f is optional +@register(model, op_f, 1, f, ∇f, ∇²f) # Providing ∇²f is optional @variable(model, x) -@objective(model, Min, udf_f(x)) +@objective(model, Min, op_f(x)) ``` #### Multivariate functions @@ -511,8 +508,8 @@ using JuMP model = Model(); @variable(model, x[1:5]) f(x::Vector) = sum(x[i]^i for i in 1:length(x)) -@register(model, udf_f, 5, (x...) -> f(collect(x))) -@objective(model, Min, udf_f(x...)) +@register(model, op_f, 5, (x...) -> f(collect(x))) +@objective(model, Min, op_f(x...)) ``` ### Automatic differentiation diff --git a/docs/src/tutorials/applications/power_systems.jl b/docs/src/tutorials/applications/power_systems.jl index 28b75c37d32..ddf11753127 100644 --- a/docs/src/tutorials/applications/power_systems.jl +++ b/docs/src/tutorials/applications/power_systems.jl @@ -513,14 +513,14 @@ function solve_nonlinear_economic_dispatch( if silent set_silent(model) end - @register(model, udf_tcf, 1, thermal_cost_function) + @register(model, op_tcf, 1, thermal_cost_function) N = length(generators) @variable(model, generators[i].min <= g[i = 1:N] <= generators[i].max) @variable(model, 0 <= w <= scenario.wind) @objective( model, Min, - sum(generators[i].variable_cost * udf_tcf(g[i]) for i in 1:N) + + sum(generators[i].variable_cost * op_tcf(g[i]) for i in 1:N) + wind.variable_cost * w, ) @constraint(model, sum(g[i] for i in 1:N) + sqrt(w) == scenario.demand) diff --git a/docs/src/tutorials/nonlinear/nested_problems.jl b/docs/src/tutorials/nonlinear/nested_problems.jl index 421a0b19039..cf20ac1b0a5 100644 --- a/docs/src/tutorials/nonlinear/nested_problems.jl +++ b/docs/src/tutorials/nonlinear/nested_problems.jl @@ -141,8 +141,8 @@ end model = Model(Ipopt.Optimizer) @variable(model, x[1:2] >= 0) -@register(model, udf_V, 2, V, ∇V, ∇²V) -@objective(model, Min, x[1]^2 + x[2]^2 + udf_V(x[1], x[2])) +@register(model, op_V, 2, V, ∇V, ∇²V) +@objective(model, Min, x[1]^2 + x[2]^2 + op_V(x[1], x[2])) optimize!(model) solution_summary(model) @@ -215,13 +215,13 @@ model = Model(Ipopt.Optimizer) cache = Cache(Float64[], NaN, Float64[]) @register( model, - udf_V, + op_V, 2, (x...) -> cached_f(cache, x...), (g, x...) -> cached_∇f(cache, g, x...), (H, x...) -> cached_∇²f(cache, H, x...), ) -@objective(model, Min, x[1]^2 + x[2]^2 + udf_V(x[1], x[2])) +@objective(model, Min, x[1]^2 + x[2]^2 + op_V(x[1], x[2])) optimize!(model) solution_summary(model) diff --git a/docs/src/tutorials/nonlinear/tips_and_tricks.jl b/docs/src/tutorials/nonlinear/tips_and_tricks.jl index a3daf79b270..48e7fe6d30a 100644 --- a/docs/src/tutorials/nonlinear/tips_and_tricks.jl +++ b/docs/src/tutorials/nonlinear/tips_and_tricks.jl @@ -46,10 +46,10 @@ foo_2(x, y) = foo(x, y)[2] model = Model(Ipopt.Optimizer) set_silent(model) @variable(model, x[1:2] >= 0, start = 0.1) -@register(model, udf_foo_1, 2, foo_1) -@register(model, udf_foo_2, 2, foo_2) -@objective(model, Max, udf_foo_1(x[1], x[2])) -@constraint(model, udf_foo_2(x[1], x[2]) <= 2) +@register(model, op_foo_1, 2, foo_1) +@register(model, op_foo_2, 2, foo_2) +@objective(model, Max, op_foo_1(x[1], x[2])) +@constraint(model, op_foo_2(x[1], x[2]) <= 2) function_calls = 0 optimize!(model) Test.@test objective_value(model) ≈ √3 atol = 1e-4 @@ -114,10 +114,10 @@ println("function_calls = ", function_calls) model = Model(Ipopt.Optimizer) set_silent(model) @variable(model, x[1:2] >= 0, start = 0.1) -@register(model, udf_foo_1, 2, memoized_foo[1]) -@register(model, udf_foo_2, 2, memoized_foo[2]) -@objective(model, Max, udf_foo_1(x[1], x[2])) -@constraint(model, udf_foo_2(x[1], x[2]) <= 2) +@register(model, op_foo_1, 2, memoized_foo[1]) +@register(model, op_foo_2, 2, memoized_foo[2]) +@objective(model, Max, op_foo_1(x[1], x[2])) +@constraint(model, op_foo_2(x[1], x[2]) <= 2) function_calls = 0 optimize!(model) Test.@test objective_value(model) ≈ √3 atol = 1e-4 diff --git a/docs/src/tutorials/nonlinear/user_defined_hessians.jl b/docs/src/tutorials/nonlinear/user_defined_hessians.jl index 75aa67ce8dc..9f49f33dc9b 100644 --- a/docs/src/tutorials/nonlinear/user_defined_hessians.jl +++ b/docs/src/tutorials/nonlinear/user_defined_hessians.jl @@ -69,7 +69,7 @@ end model = Model(Ipopt.Optimizer) @variable(model, x[1:2]) -@register(model, udf_rosenbrock, 2, rosenbrock, ∇rosenbrock, ∇²rosenbrock) -@objective(model, Min, udf_rosenbrock(x[1], x[2])) +@register(model, op_rosenbrock, 2, rosenbrock, ∇rosenbrock, ∇²rosenbrock) +@objective(model, Min, op_rosenbrock(x[1], x[2])) optimize!(model) solution_summary(model; verbose = true) diff --git a/src/macros.jl b/src/macros.jl index 92689525c77..69e558f6110 100644 --- a/src/macros.jl +++ b/src/macros.jl @@ -475,10 +475,11 @@ function parse_constraint_head( end """ - nonlinear_ifelse(a, x, y) + op_ifelse(a, x, y) -A function that falls back to `ifelse(a, x, y)`, but when called with JuMP -variables or expressions, returns a [`GenericNonlinearExpr`](@ref). +A function that falls back to `ifelse(a, x, y)`, but when called with a JuMP +variables or expression in the first argument, returns a +[`GenericNonlinearExpr`](@ref). ## Example @@ -487,19 +488,27 @@ julia> model = Model(); julia> @variable(model, x); -julia> nonlinear_ifelse(true, 1.0, 2.0) +julia> op_ifelse(true, 1.0, 2.0) 1.0 -julia> nonlinear_ifelse(x, 1.0, 2.0) +julia> op_ifelse(x, 1.0, 2.0) ifelse(x, 1.0, 2.0) + +julia> op_ifelse(true, x, 2.0) +x ``` """ -nonlinear_ifelse(a, x, y) = ifelse(a, x, y) +op_ifelse(a, x, y) = ifelse(a, x, y) + +# We can't make this a generic `NonlinearOperator` because we only want to +# intercept `ifelse` if the first argument is an `AbstractJuMPScalar` (if it's a +# `Bool`, we want to return the correct branch). +op_ifelse(a::AbstractJuMPScalar, x, y) = NonlinearExpr(:ifelse, Any[a, x, y]) """ - nonlinear_and(x, y) + op_and(x, y) -A function that falls back to `x && y`, but when called with JuMP variables or +A function that falls back to `x & y`, but when called with JuMP variables or expressions, returns a [`GenericNonlinearExpr`](@ref). ## Example @@ -509,19 +518,21 @@ julia> model = Model(); julia> @variable(model, x); -julia> nonlinear_and(true, false) +julia> op_and(true, false) false -julia> nonlinear_and(true, x) +julia> op_and(true, x) true && x ``` """ -nonlinear_and(x, y) = x && y +const op_and = NonlinearOperator(:&&, &) +# Note that the function is `&` instead of `&&` because `&&` is special lowering +# syntax and is not a regular Julia function, but the MOI operator is `:&&`. """ - nonlinear_or(x, y) + op_or(x, y) -A function that falls back to `x || y`, but when called with JuMP variables or +A function that falls back to `x | y`, but when called with JuMP variables or expressions, returns a [`GenericNonlinearExpr`](@ref). ## Example @@ -531,17 +542,19 @@ julia> model = Model(); julia> @variable(model, x); -julia> nonlinear_or(true, false) +julia> op_or(true, false) true -julia> nonlinear_or(true, x) +julia> op_or(true, x) true || x ``` """ -nonlinear_or(x, y) = x || y +const op_or = NonlinearOperator(:||, |) +# Note that the function is `|` instead of `||` because `||` is special lowering +# syntax and is not a regular Julia function, but the MOI operator is `:||`. """ - nonlinear_less_than(x, y) + op_less_than(x, y) A function that falls back to `x < y`, but when called with JuMP variables or expressions, returns a [`GenericNonlinearExpr`](@ref). @@ -553,17 +566,17 @@ julia> model = Model(); julia> @variable(model, x); -julia> nonlinear_less_than(1, 2) +julia> op_less_than(1, 2) true -julia> nonlinear_less_than(x, 2) +julia> op_less_than(x, 2) x < 2 ``` """ -nonlinear_less_than(x, y) = x < y +const op_less_than = NonlinearOperator(:<, <) """ - nonlinear_greater_than(x, y) + op_greater_than(x, y) A function that falls back to `x > y`, but when called with JuMP variables or expressions, returns a [`GenericNonlinearExpr`](@ref). @@ -575,17 +588,17 @@ julia> model = Model(); julia> @variable(model, x); -julia> nonlinear_greater_than(1, 2) +julia> op_greater_than(1, 2) false -julia> nonlinear_greater_than(x, 2) +julia> op_greater_than(x, 2) x > 2 ``` """ -nonlinear_greater_than(x, y) = x > y +const op_greater_than = NonlinearOperator(:>, >) """ - nonlinear_less_equal(x, y) + op_less_equal(x, y) A function that falls back to `x <= y`, but when called with JuMP variables or expressions, returns a [`GenericNonlinearExpr`](@ref). @@ -597,17 +610,17 @@ julia> model = Model(); julia> @variable(model, x); -julia> nonlinear_less_equal(2, 2) +julia> op_less_equal(2, 2) true -julia> nonlinear_less_equal(x, 2) +julia> op_less_equal(x, 2) x <= 2 ``` """ -nonlinear_less_equal(x, y) = x <= y +const op_less_equal = NonlinearOperator(:<=, <=) """ - nonlinear_greater_equal(x, y) + op_greater_equal(x, y) A function that falls back to `x >= y`, but when called with JuMP variables or expressions, returns a [`GenericNonlinearExpr`](@ref). @@ -619,17 +632,17 @@ julia> model = Model(); julia> @variable(model, x); -julia> nonlinear_greater_equal(2, 2) +julia> op_greater_equal(2, 2) true -julia> nonlinear_greater_equal(x, 2) +julia> op_greater_equal(x, 2) x >= 2 ``` """ -nonlinear_greater_equal(x, y) = x >= y +const op_greater_equal = NonlinearOperator(:>=, >=) """ - nonlinear_equal_to(x, y) + op_equal_to(x, y) A function that falls back to `x == y`, but when called with JuMP variables or expressions, returns a [`GenericNonlinearExpr`](@ref). @@ -641,40 +654,40 @@ julia> model = Model(); julia> @variable(model, x); -julia> nonlinear_equal_to(2, 2) +julia> op_equal_to(2, 2) true -julia> nonlinear_equal_to(x, 2) +julia> op_equal_to(x, 2) x == 2 ``` """ -nonlinear_equal_to(x, y) = x == y +const op_equal_to = NonlinearOperator(:(==), ==) function _rewrite_to_jump_logic(x) if Meta.isexpr(x, :call) op = if x.args[1] == :ifelse - return Expr(:call, nonlinear_ifelse, x.args[2:end]...) + return Expr(:call, op_ifelse, x.args[2:end]...) elseif x.args[1] == :< - return Expr(:call, nonlinear_less_than, x.args[2:end]...) + return Expr(:call, op_less_than, x.args[2:end]...) elseif x.args[1] == :> - return Expr(:call, nonlinear_greater_than, x.args[2:end]...) + return Expr(:call, op_greater_than, x.args[2:end]...) elseif x.args[1] == :<= - return Expr(:call, nonlinear_less_equal, x.args[2:end]...) + return Expr(:call, op_less_equal, x.args[2:end]...) elseif x.args[1] == :>= - return Expr(:call, nonlinear_greater_equal, x.args[2:end]...) + return Expr(:call, op_greater_equal, x.args[2:end]...) elseif x.args[1] == :(==) - return Expr(:call, nonlinear_equal_to, x.args[2:end]...) + return Expr(:call, op_equal_to, x.args[2:end]...) end elseif Meta.isexpr(x, :||) - return Expr(:call, nonlinear_or, x.args...) + return Expr(:call, op_or, x.args...) elseif Meta.isexpr(x, :&&) - return Expr(:call, nonlinear_and, x.args...) + return Expr(:call, op_and, x.args...) elseif Meta.isexpr(x, :comparison) lhs = Expr(:call, x.args[2], x.args[1], x.args[3]) rhs = Expr(:call, x.args[4], x.args[3], x.args[5]) return Expr( :call, - nonlinear_and, + op_and, _rewrite_to_jump_logic(lhs), _rewrite_to_jump_logic(rhs), ) diff --git a/src/nlp_expr.jl b/src/nlp_expr.jl index 381f9b46123..de80e868297 100644 --- a/src/nlp_expr.jl +++ b/src/nlp_expr.jl @@ -25,8 +25,8 @@ and the default list of supported multivariate operators is given by: * [`MOI.Nonlinear.DEFAULT_MULTIVARIATE_OPERATORS`](@ref) -Additional operators can be registered by setting a [`MOI.UserDefinedFunction`](@ref) -attribute. +Additional operators can be registered by setting a +[`MOI.UserDefinedFunction`](@ref) attribute. See the full list of operators supported by a [`MOI.ModelLike`](@ref) by querying [`MOI.ListOfSupportedNonlinearOperators`](@ref). @@ -234,8 +234,8 @@ function MOI.Nonlinear.parse_expression( if arg isa GenericNonlinearExpr _parse_without_recursion_inner(stack, data, expr, arg, parent_node) else - # We can use recursion here, because GenericNonlinearExpr only occur in - # other GenericNonlinearExpr. + # We can use recursion here, because GenericNonlinearExpr only occur + # in other GenericNonlinearExpr. MOI.Nonlinear.parse_expression(data, expr, arg, parent_node) end end @@ -470,33 +470,6 @@ function _needs_flatten(parent::GenericNonlinearExpr, arg::GenericNonlinearExpr) end end -function nonlinear_ifelse(a::AbstractJuMPScalar, x, y) - return GenericNonlinearExpr{variable_ref_type(a)}(:ifelse, Any[a, x, y]) -end - -for (f, op) in ( - :nonlinear_and => :&&, - :nonlinear_or => :||, - :nonlinear_less_than => :(<), - :nonlinear_greater_than => :(>), - :nonlinear_less_equal => :(<=), - :nonlinear_greater_equal => :(>=), - :nonlinear_equal_to => :(==), -) - op = Meta.quot(op) - @eval begin - function $(f)(x::AbstractJuMPScalar, y) - return GenericNonlinearExpr{variable_ref_type(x)}($op, x, y) - end - function $(f)(x, y::AbstractJuMPScalar) - return GenericNonlinearExpr{variable_ref_type(y)}($op, x, y) - end - function $(f)(x::AbstractJuMPScalar, y::AbstractJuMPScalar) - return GenericNonlinearExpr{variable_ref_type(x)}($op, x, y) - end - end -end - # JuMP interop function owner_model(expr::GenericNonlinearExpr) @@ -789,12 +762,19 @@ function _MA.promote_operation( end """ - UserDefinedFunction(head::Symbol, func::Function) + NonlinearOperator(head::Symbol, func::Function) -A struct representing a user-defined function named `head`. +A callable struct (functor) representing a function named `head`. -This function must have already been added to the model using -[`add_user_defined_function`](@ref) or [`@register`](@ref). +When called with [`AbstractJuMPScalar`](@ref)s, the struct returns a +[`GenericNonlinearExpr`](@ref). + +When called with non-JuMP types, the struct returns the evaluation of +`func(args...)`. + +Unless `head` is special-cased by the optimizer, the operator must have already +been added to the model using [`register_nonlinear_operator`](@ref) or +[`@register`](@ref). ## Example @@ -813,25 +793,54 @@ julia> ∇f(x::Float64) = 2 * x julia> ∇²f(x::Float64) = 2.0 ∇²f (generic function with 1 method) -julia> @register(model, udf_f, 1, f, ∇f, ∇²f) -UserDefinedFunction{typeof(f)}(:udf_f, f) +julia> @register(model, op_f, 1, f, ∇f, ∇²f) +NonlinearOperator(:op_f, f) -julia> bar = UserDefinedFunction(:udf_f, f) -UserDefinedFunction{typeof(f)}(:udf_f, f) +julia> bar = NonlinearOperator(:op_f, f) +NonlinearOperator(:op_f, f) julia> @objective(model, Min, bar(x)) -udf_f(x) +op_f(x) julia> bar(2.0) 4.0 ``` """ -struct UserDefinedFunction{F} +struct NonlinearOperator{F} head::Symbol func::F end -function (f::UserDefinedFunction)(args...) +# Make it so that we don't print the complicated type parameter +function Base.show(io::IO, f::NonlinearOperator) + return print(io, "NonlinearOperator(:$(f.head), $(f.func))") +end + +# Fast overload for unary calls + +(f::NonlinearOperator)(x) = f.func(x) + +(f::NonlinearOperator)(x::AbstractJuMPScalar) = NonlinearExpr(f.head, Any[x]) + +# Fast overload for binary calls + +(f::NonlinearOperator)(x, y) = f.func(x, y) + +function (f::NonlinearOperator)(x::AbstractJuMPScalar, y) + return GenericNonlinearExpr(f.head, Any[x, y]) +end + +function (f::NonlinearOperator)(x, y::AbstractJuMPScalar) + return GenericNonlinearExpr(f.head, Any[x, y]) +end + +function (f::NonlinearOperator)(x::AbstractJuMPScalar, y::AbstractJuMPScalar) + return GenericNonlinearExpr(f.head, Any[x, y]) +end + +# Fallback for more arguments +function (f::NonlinearOperator)(x, y, z...) + args = (x, y, z...) if any(Base.Fix2(isa, AbstractJuMPScalar), args) return GenericNonlinearExpr(f.head, Any[a for a in args]) end @@ -839,7 +848,7 @@ function (f::UserDefinedFunction)(args...) end """ - add_user_defined_function( + register_nonlinear_operator( model::Model, dim::Int, f::Function, @@ -848,12 +857,15 @@ end [name::Symbol = Symbol(f),] ) -Add a user-defined function with `dim` input arguments to `model` and associate -it with the operator `name`. +Register a new nonlinear operator with `dim` input arguments to `model` and +associate it with the name `name`. + +The function `f` evaluates the function and must return a scalar. + +The optional function `∇f` evaluates the first derivative, and the optional +function `∇²f` evaluates the second derivative. -The function `f` evaluates the function. The optional function `∇f` evaluates -the first derivative, and the optional function `∇²f` evaluates the second -derivative. `∇²f` may be provided only if `∇f` is also provided. +`∇²f` may be provided only if `∇f` is also provided. ## Example @@ -872,17 +884,17 @@ julia> ∇f(x::Float64) = 2 * x julia> ∇²f(x::Float64) = 2.0 ∇²f (generic function with 1 method) -julia> udf_f = add_user_defined_function(model, 1, f, ∇f, ∇²f) -UserDefinedFunction{typeof(f)}(:f, f) +julia> op_f = register_nonlinear_operator(model, 1, f, ∇f, ∇²f) +NonlinearOperator(:f, f) -julia> @objective(model, Min, udf_f(x)) +julia> @objective(model, Min, op_f(x)) f(x) -julia> udf_f(2.0) +julia> op_f(2.0) 4.0 ``` """ -function add_user_defined_function( +function register_nonlinear_operator( model::GenericModel, dim::Int, f::Function, @@ -892,10 +904,10 @@ function add_user_defined_function( nargs = 1 + N if !(1 <= nargs <= 3) error( - "Unable to register user-defined function $name: invalid number " * - "of functions provided. Got $nargs, but expected 1 (if function " * - "only), 2 (if function and gradient), or 3 (if function, " * - "gradient, and hesssian provided)", + "Unable to register operator $name: invalid number of functions " * + "provided. Got $nargs, but expected 1 (if function only), 2 (if " * + "function and gradient), or 3 (if function, gradient, and " * + "hesssian provided)", ) end # TODO(odow): we could add other checks here, but we won't for now because @@ -903,22 +915,22 @@ function add_user_defined_function( # MOI.Nonlinear will automatically check for autodiff and common mistakes # and throw a nice informative error. MOI.set(model, MOI.UserDefinedFunction(name, dim), tuple(f, args...)) - return UserDefinedFunction(name, f) + return NonlinearOperator(name, f) end -function add_user_defined_function(::GenericModel, ::Int; kwargs...) +function register_nonlinear_operator(::GenericModel, ::Int; kwargs...) return error( - "Unable to register user-defined function because no functions were " * - "provided. Expected 1 (if function only), 2 (if function and " * - "gradient), or 3 (if function, gradient, and hesssian provided)", + "Unable to register operator because no functions were provided. " * + "Expected 1 (if function only), 2 (if function and gradient), or 3 " * + "(if function, gradient, and hesssian provided)", ) end """ - @register(model, operator, dim, args...) + @register(model, operator, dim, f[, ∇f[, ∇²f]]) -Register a user-defined function in `model`, and create a new variable -[`UserDefinedFunction`](@ref) called `operator` in the current scope. +Register the nonlinear operator `operator` in `model`, and create a new +[`NonlinearOperator`](@ref) object called `operator` in the current scope. ## Example @@ -937,27 +949,27 @@ julia> ∇f(x::Float64) = 2 * x julia> ∇²f(x::Float64) = 2.0 ∇²f (generic function with 1 method) -julia> @register(model, udf_f, 1, f, ∇f, ∇²f) -UserDefinedFunction{typeof(f)}(:udf_f, f) +julia> @register(model, op_f, 1, f, ∇f, ∇²f) +NonlinearOperator(:op_f, f) -julia> @objective(model, Min, udf_f(x)) -udf_f(x) +julia> @objective(model, Min, op_f(x)) +op_f(x) -julia> udf_f(2.0) +julia> op_f(2.0) 4.0 -julia> model[:udf_f] -UserDefinedFunction{typeof(f)}(:udf_f, f) +julia> model[:op_f] +NonlinearOperator(:op_f, f) -julia> model[:udf_f](x) -udf_f(x) +julia> model[:op_f](x) +op_f(x) ``` ## Non-macro version This macro is provided as helpful syntax that matches the style of the rest of -the JuMP macros. However, you may also create user-defined functions outside the -macros using [`add_user_defined_function`](@ref). For example: +the JuMP macros. However, you may also register operators outside the macro +using [`register_nonlinear_operator`](@ref). For example: ```jldoctest julia> model = Model(); @@ -965,8 +977,8 @@ julia> model = Model(); julia> f(x) = x^2 f (generic function with 1 method) -julia> @register(model, udf_f, 1, f) -UserDefinedFunction{typeof(f)}(:udf_f, f) +julia> @register(model, op_f, 1, f) +NonlinearOperator(:op_f, f) ``` is equivalent to ```jldoctest @@ -975,14 +987,14 @@ julia> model = Model(); julia> f(x) = x^2 f (generic function with 1 method) -julia> udf_f = model[:udf_f] = add_user_defined_function(model, 1, f; name = :udf_f) -UserDefinedFunction{typeof(f)}(:udf_f, f) +julia> op_f = model[:op_f] = register_nonlinear_operator(model, 1, f; name = :op_f) +NonlinearOperator(:op_f, f) ``` """ macro register(model, op, args...) return _macro_assign_and_return( quote - add_user_defined_function( + register_nonlinear_operator( $(esc(model)), $(esc.(args)...); name = $(Meta.quot(op)), diff --git a/src/variables.jl b/src/variables.jl index c66feaf0642..46e63ff2702 100644 --- a/src/variables.jl +++ b/src/variables.jl @@ -2043,9 +2043,9 @@ There are three common mistakes that lead to this. ```julia foo(x) = x $(sym) 1 ? 0 : 1 - x model = Model() - @register(model, udf_f, 1, foo) + @register(model, op_f, 1, foo) @variable(model, x) - @expression(model, udf_f(x)) + @expression(model, op_f(x)) ``` 3. You tried to create a logical nonlinear expression outside a macro, for diff --git a/test/test_nlp_expr.jl b/test/test_nlp_expr.jl index 2d661ea8637..aa4a2dddc0a 100644 --- a/test/test_nlp_expr.jl +++ b/test/test_nlp_expr.jl @@ -469,6 +469,7 @@ function test_register_univariate() model = Model() @variable(model, x) @register(model, f, 1, x -> x^2) + @test f isa NonlinearOperator @test isequal_canonical(@expression(model, f(x)), f(x)) @test isequal_canonical(f(x), GenericNonlinearExpr(:f, Any[x])) attrs = MOI.get(model, MOI.ListOfModelAttributesSet()) @@ -555,7 +556,7 @@ function test_register_errors() model = Model() @test_throws( ErrorException( - "Unable to register user-defined function because no functions were " * + "Unable to register operator because no functions were " * "provided. Expected 1 (if function only), 2 (if function and " * "gradient), or 3 (if function, gradient, and hesssian provided)", ), @@ -564,7 +565,7 @@ function test_register_errors() f = x -> x^2 @test_throws( ErrorException( - "Unable to register user-defined function foo: invalid number of " * + "Unable to register operator foo: invalid number of " * "functions provided. Got 4, but expected 1 (if function only), " * "2 (if function and gradient), or 3 (if function, gradient, and " * "hesssian provided)", @@ -610,7 +611,7 @@ function test_value_expression() y = QuadExpr(x + 1) @test value(f, my_foo(y)) ≈ (value(f, y) - 1)^2 @test value(f, my_bar(2.2, x)) ≈ sqrt(2.2 - 1.1) - bad_udf = UserDefinedFunction(:bad_udf, f) + bad_udf = NonlinearOperator(:bad_udf, f) @test_throws( ErrorException( "Unable to evaluate nonlinear operator bad_udf because it is not " * From dba484809816c4d0e70af620cf0402672629b147 Mon Sep 17 00:00:00 2001 From: odow Date: Fri, 25 Aug 2023 13:43:17 +1200 Subject: [PATCH 12/23] Throw nice error on redefinition of constant Updates Fix docs Only throw if name is exactly the same Fix docs Update Fix tests --- docs/src/manual/nlp.md | 9 +-- docs/src/manual/nonlinear.md | 72 ++++++++----------- .../tutorials/nonlinear/nested_problems.jl | 4 +- src/nlp_expr.jl | 40 +++++++++-- test/test_nlp_expr.jl | 20 +++--- 5 files changed, 81 insertions(+), 64 deletions(-) diff --git a/docs/src/manual/nlp.md b/docs/src/manual/nlp.md index 9374510825d..178a98fba01 100644 --- a/docs/src/manual/nlp.md +++ b/docs/src/manual/nlp.md @@ -8,10 +8,11 @@ DocTestFilters = [r"≤|<=", r"≥|>=", r" == | = ", r" ∈ | in ", r"MathOptInt # Nonlinear Modeling -!!! info - This page describes the legacy nonlinear interface to JuMP. A new, - experimental nonlinear interface is in development. Find out more by reading - [Nonlinear Modeling](@ref new_nonlinear_interface). +!!! warning + This page describes the legacy nonlinear interface to JuMP. It has a number + of quirks and limitations that prompted the development of a new nonlinear + interface. The new interface is documented at [Nonlinear Modeling](@ref new_nonlinear_interface). + This legacy interface will remain for all future `v1.X` releases of JuMP. JuMP has support for general smooth nonlinear (convex and nonconvex) optimization problems. JuMP is able to provide exact, sparse second-order diff --git a/docs/src/manual/nonlinear.md b/docs/src/manual/nonlinear.md index 0c87243b58d..83993d7fa93 100644 --- a/docs/src/manual/nonlinear.md +++ b/docs/src/manual/nonlinear.md @@ -9,16 +9,16 @@ DocTestFilters = [r"≤|<=", r"≥|>=", r" == | = ", r" ∈ | in ", r"MathOptInt # [Nonlinear Modeling](@id new_nonlinear_interface) !!! warning - This page describes an experimental nonlinear interface to JuMP. The API - described below is stable, and it will not break with future 1.X releases of - JuMP. However, solver support may be limited, and there may be gaps in - functionality compared with [Nonlinear Modeling](@ref). To report a bug, or - request a missing feature, please [open an issue](https://github.com/jump-dev/JuMP.jl/issues/new/choose). + This page describes a new nonlinear interface to JuMP. It replaces the + legacy `@NL` interface, which is documented at [Nonlinear Modeling](@ref). + The API described below is stable, and it will not break with future 1.X + releases of JuMP. However, solver support may be limited, and there may be + gaps in functionality compared with the legacy interface. To report a bug, + or request a missing feature, please [open an issue](https://github.com/jump-dev/JuMP.jl/issues/new/choose). -JuMP has support for general smooth nonlinear (convex and nonconvex) -optimization problems. JuMP is able to provide exact, sparse second-order -derivatives to solvers. This information can improve solver accuracy and -performance. +JuMP has support for nonlinear (convex and nonconvex) optimization problems. +JuMP is able to automatically provide exact, sparse second-order derivatives to +solvers. This information can improve solver accuracy and performance. ## Set a nonlinear objective @@ -339,9 +339,17 @@ The arguments to [`@register`](@ref) are: 4. A Julia method which computes the function. !!! warning - User-defined functions cannot be re-registered and will not update if you - modify the underlying Julia function. If you want to change a user-defined - function between solves, rebuild the model or use a different name. + User-defined functions cannot be re-registered or deleted. + +You can obtain a reference to the operator using the `model[:key]` syntax: + +```@repl +using JuMP +square(x) = x^2 +model = Model(); +@register(model, op_square, 1, square) +op_square_2 = model[:op_square] +``` ### Registered functions without macros @@ -362,37 +370,9 @@ model[:op_f] = op_f @objective(model, Min, op_f(x[1], op_square(x[2]))) ``` -This has two important consequences. +### Registering with the same name as an existing function -First, you cannot register a user-defined function with the same name as an -existing function. For example, a call to [`@register`](@ref) like: -```julia -julia> @register(model, square, 1, square) -``` -will error because it is equivalent to: -```julia -julia> square = register_nonlinear_operator(model, 1, square; name = :square) -ERROR: invalid redefinition of constant square -Stacktrace: -[...] -``` -and `square` already exists as a Julia function. - -Second, you can obtain a reference to the user-defined function using the -`model[:key]` syntax: - -```@repl -using JuMP -square(x) = x^2 -model = Model(); -@register(model, op_square, 1, square) -op_square_2 = model[:op_square] -``` - -### Invalid redefinition of constant - -A common error encountered is `invalid redefinition of constant`. This occurs -when the name of the user-defined function is the same as an existing function: +A common error encountered is the following: ```jldoctest nonlinear_invalid_redefinition julia> using JuMP @@ -402,9 +382,15 @@ julia> f(x) = x^2 f (generic function with 1 method) julia> @register(model, f, 1, f) -ERROR: invalid redefinition of constant f +ERROR: Unable to register the nonlinear operator `:f` with the same name as +an existing function. [...] ``` +This error occurs because `@register(model, f, 1, f)` is equivalent to: +```julia +julia> f = register_nonlinear_operator(model, 1, f; name = :f) +``` +but `f` already exists as a Julia function. If you evaluate the function without registering it, JuMP will trace the function using operator overloading: diff --git a/docs/src/tutorials/nonlinear/nested_problems.jl b/docs/src/tutorials/nonlinear/nested_problems.jl index cf20ac1b0a5..4cc7b8624e3 100644 --- a/docs/src/tutorials/nonlinear/nested_problems.jl +++ b/docs/src/tutorials/nonlinear/nested_problems.jl @@ -215,13 +215,13 @@ model = Model(Ipopt.Optimizer) cache = Cache(Float64[], NaN, Float64[]) @register( model, - op_V, + op_cached_f, 2, (x...) -> cached_f(cache, x...), (g, x...) -> cached_∇f(cache, g, x...), (H, x...) -> cached_∇²f(cache, H, x...), ) -@objective(model, Min, x[1]^2 + x[2]^2 + op_V(x[1], x[2])) +@objective(model, Min, x[1]^2 + x[2]^2 + op_cached_f(x[1], x[2])) optimize!(model) solution_summary(model) diff --git a/src/nlp_expr.jl b/src/nlp_expr.jl index de80e868297..33a8149c20b 100644 --- a/src/nlp_expr.jl +++ b/src/nlp_expr.jl @@ -918,12 +918,35 @@ function register_nonlinear_operator( return NonlinearOperator(name, f) end -function register_nonlinear_operator(::GenericModel, ::Int; kwargs...) - return error( - "Unable to register operator because no functions were provided. " * - "Expected 1 (if function only), 2 (if function and gradient), or 3 " * - "(if function, gradient, and hesssian provided)", - ) +function _catch_redefinition_constant_error(op::Symbol, f::Function) + if op == Symbol(f) + error(""" + Unable to register the nonlinear operator `:$op` with the same name as + an existing function. + + For example, this code will error: + ```julia + model = Model() + f(x) = x^2 + @register(model, f, 1, f) + ``` + because it is equivalent to: + ```julia + model = Model() + f(x) = x^2 + f = register_nonlinear_operator(model, 1, f; name = :f) + ``` + + To fix, use a unique name, like `op_$op`: + ```julia + model = Model() + f(x) = x^2 + @register(model, op_f, 1, f) + @expression(model, op_f(x)) + ``` + """) + end + return end """ @@ -991,11 +1014,14 @@ julia> op_f = model[:op_f] = register_nonlinear_operator(model, 1, f; name = :op NonlinearOperator(:op_f, f) ``` """ -macro register(model, op, args...) +macro register(model, op, dim, f, args...) return _macro_assign_and_return( quote + _catch_redefinition_constant_error($(Meta.quot(op)), $(esc(f))) register_nonlinear_operator( $(esc(model)), + $(esc(dim)), + $(esc(f)), $(esc.(args)...); name = $(Meta.quot(op)), ) diff --git a/test/test_nlp_expr.jl b/test/test_nlp_expr.jl index aa4a2dddc0a..50e116c984d 100644 --- a/test/test_nlp_expr.jl +++ b/test/test_nlp_expr.jl @@ -554,14 +554,6 @@ end function test_register_errors() model = Model() - @test_throws( - ErrorException( - "Unable to register operator because no functions were " * - "provided. Expected 1 (if function only), 2 (if function and " * - "gradient), or 3 (if function, gradient, and hesssian provided)", - ), - @register(model, foo, 2), - ) f = x -> x^2 @test_throws( ErrorException( @@ -803,4 +795,16 @@ function test_operator_overload_complex_error() return end +function test_redefinition_of_function() + model = Model() + f(x) = x^2 + err = try + JuMP._catch_redefinition_constant_error(:f, f) + catch err + err + end + @test_throws(err, @register(model, f, 1, f)) + return +end + end # module From 1788385d3a83faeaf75728710ad1db39f7232ddf Mon Sep 17 00:00:00 2001 From: odow Date: Sat, 26 Aug 2023 12:10:25 +1200 Subject: [PATCH 13/23] Improve test coverage --- src/nlp_expr.jl | 4 ++-- test/test_nlp_expr.jl | 15 ++++++++++++++- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/src/nlp_expr.jl b/src/nlp_expr.jl index 33a8149c20b..4e2254c37aa 100644 --- a/src/nlp_expr.jl +++ b/src/nlp_expr.jl @@ -178,10 +178,10 @@ function function_string(::MIME"text/latex", x::GenericNonlinearExpr) if arg isa GenericNonlinearExpr if arg.head in _PREFIX_OPERATORS && length(arg.args) > 1 if _needs_parentheses(arg.args[1]) - print(io, "\\left({") + print(io, "{\\left({") end if _needs_parentheses(arg.args[end]) - push!(stack, "}\\right)") + push!(stack, "}\\right)}") end for i in length(arg.args):-1:2 push!(stack, arg.args[i]) diff --git a/test/test_nlp_expr.jl b/test/test_nlp_expr.jl index 50e116c984d..46df0189dea 100644 --- a/test/test_nlp_expr.jl +++ b/test/test_nlp_expr.jl @@ -139,6 +139,8 @@ function test_extension_latex(ModelType = Model, VariableRefType = VariableRef) @expression(model, g, ifelse(x > 0, sin(x), x + cos(x)^2)) @test function_string(MIME("text/latex"), g) == raw"\textsf{ifelse}\left({x} > {0}, {\textsf{sin}\left({x}\right)}, {x} + {\left({\textsf{cos}\left({x}\right)} ^ {2.0}\right)}\right)" + @test function_string(MIME("text/latex"), (x + 1) / (x + 1)) == + raw"{\left({x + 1}\right)} / {\left({x + 1}\right)}" return end @@ -470,6 +472,7 @@ function test_register_univariate() @variable(model, x) @register(model, f, 1, x -> x^2) @test f isa NonlinearOperator + @test sprint(show, f) == "NonlinearOperator(:f, $(f.func))" @test isequal_canonical(@expression(model, f(x)), f(x)) @test isequal_canonical(f(x), GenericNonlinearExpr(:f, Any[x])) attrs = MOI.get(model, MOI.ListOfModelAttributesSet()) @@ -509,7 +512,7 @@ function test_register_univariate_gradient_hessian() return end -function test_register_multivariate_() +function test_register_multivariate() model = Model() @variable(model, x[1:2]) f = (x...) -> sum(x .^ 2) @@ -552,6 +555,16 @@ function test_register_multivariate_gradient_hessian() return end +function test_register_multivariate_many_args() + model = Model() + @variable(model, x[1:10]) + f = (x...) -> sum(x .^ 2) + @register(model, foo, 10, f) + @test isequal_canonical(foo(x...), GenericNonlinearExpr(:foo, Any[x...])) + @test foo((1:10)...) == 385 + return +end + function test_register_errors() model = Model() f = x -> x^2 From e56c53a0e59e83808112feb61b8fc783d54a0371 Mon Sep 17 00:00:00 2001 From: odow Date: Sat, 26 Aug 2023 14:06:02 +1200 Subject: [PATCH 14/23] Fix LaTeX printing of NonlinearExpr --- docs/src/manual/nlp.md | 1 + docs/src/tutorials/nonlinear/complementarity.jl | 4 ++-- src/nlp_expr.jl | 10 ++++++---- test/test_nlp_expr.jl | 16 ++++++++++++---- 4 files changed, 21 insertions(+), 10 deletions(-) diff --git a/docs/src/manual/nlp.md b/docs/src/manual/nlp.md index 178a98fba01..bd9c2cda8ab 100644 --- a/docs/src/manual/nlp.md +++ b/docs/src/manual/nlp.md @@ -13,6 +13,7 @@ DocTestFilters = [r"≤|<=", r"≥|>=", r" == | = ", r" ∈ | in ", r"MathOptInt of quirks and limitations that prompted the development of a new nonlinear interface. The new interface is documented at [Nonlinear Modeling](@ref new_nonlinear_interface). This legacy interface will remain for all future `v1.X` releases of JuMP. + The two nonlinear interfaces cannot be combined. JuMP has support for general smooth nonlinear (convex and nonconvex) optimization problems. JuMP is able to provide exact, sparse second-order diff --git a/docs/src/tutorials/nonlinear/complementarity.jl b/docs/src/tutorials/nonlinear/complementarity.jl index e407757020e..a301aab933f 100644 --- a/docs/src/tutorials/nonlinear/complementarity.jl +++ b/docs/src/tutorials/nonlinear/complementarity.jl @@ -72,7 +72,7 @@ value.([w, x, y, z]) # ## Transportation -# This is example is a reformulation of the transportation problem from Chapter +# This example is a reformulation of the transportation problem from Chapter # 3.3 of Dantzig, G.B. (1963). _Linear Programming and Extensions_. Princeton # University Press, Princeton, New Jersey. It is based on the GAMS model # [`gamslib_transmcp`](https://www.gams.com/latest/gamslib_ml/libhtml/gamslib_transmcp.html). @@ -142,7 +142,7 @@ value(K) # ## Electricity consumption -# This is example is mixed complementarity formulation of example 3.3.1 from +# This example is a mixed complementarity formulation of example 3.3.1 from # D’Aertrycke, G., Ehrenmann, A., Ralph, D., & Smeers, Y. (2017). [Risk trading # in capacity equilibrium models](https://doi.org/10.17863/CAM.17552). diff --git a/src/nlp_expr.jl b/src/nlp_expr.jl index 4e2254c37aa..a1ecef4d0cd 100644 --- a/src/nlp_expr.jl +++ b/src/nlp_expr.jl @@ -177,20 +177,22 @@ function function_string(::MIME"text/latex", x::GenericNonlinearExpr) arg = pop!(stack) if arg isa GenericNonlinearExpr if arg.head in _PREFIX_OPERATORS && length(arg.args) > 1 + print(io, "{") + push!(stack, "}") if _needs_parentheses(arg.args[1]) - print(io, "{\\left({") + print(io, "\\left(") end if _needs_parentheses(arg.args[end]) - push!(stack, "}\\right)}") + push!(stack, "\\right)") end for i in length(arg.args):-1:2 push!(stack, arg.args[i]) if _needs_parentheses(arg.args[i]) - push!(stack, "\\left({") + push!(stack, "\\left(") end push!(stack, "} $(arg.head) {") if _needs_parentheses(arg.args[i-1]) - push!(stack, "}\\right)") + push!(stack, "\\right)") end end push!(stack, arg.args[1]) diff --git a/test/test_nlp_expr.jl b/test/test_nlp_expr.jl index 46df0189dea..56a4fb1508a 100644 --- a/test/test_nlp_expr.jl +++ b/test/test_nlp_expr.jl @@ -135,12 +135,20 @@ function test_extension_latex(ModelType = Model, VariableRefType = VariableRef) @variable(model, x) @test function_string(MIME("text/latex"), sin(x)) == raw"\textsf{sin}\left({x}\right)" - @test function_string(MIME("text/plain"), sin(x)) == "sin(x)" + @test function_string(MIME("text/latex"), sin(x)^x) == + raw"{\textsf{sin}\left({x}\right)} ^ {x}" + @test function_string(MIME("text/latex"), sin(x)^(x + 1)) == + raw"{\textsf{sin}\left({x}\right)} ^ {\left(x + 1\right)}" + @test function_string(MIME("text/latex"), (x + 1)^x) == + raw"{\left(x + 1\right)} ^ {x}" + @test function_string(MIME("text/latex"), (x + 1)^(x + 1)) == + raw"{\left(x + 1\right)} ^ {\left(x + 1\right)}" + @test function_string(MIME("text/latex"), (x + 1)^sin(x)) == + raw"{\left(x + 1\right)} ^ {\textsf{sin}\left({x}\right)}" + @expression(model, g, ifelse(x > 0, sin(x), x + cos(x)^2)) @test function_string(MIME("text/latex"), g) == - raw"\textsf{ifelse}\left({x} > {0}, {\textsf{sin}\left({x}\right)}, {x} + {\left({\textsf{cos}\left({x}\right)} ^ {2.0}\right)}\right)" - @test function_string(MIME("text/latex"), (x + 1) / (x + 1)) == - raw"{\left({x + 1}\right)} / {\left({x + 1}\right)}" + raw"\textsf{ifelse}\left({{x} > {0}}, {\textsf{sin}\left({x}\right)}, {{x} + {\left({\textsf{cos}\left({x}\right)} ^ {2.0}\right)}}\right)" return end From 9c4b5c8a77c4c46c779d2fc93bbe5e1530d78176 Mon Sep 17 00:00:00 2001 From: odow Date: Mon, 28 Aug 2023 09:14:01 +1200 Subject: [PATCH 15/23] Rename op_ functions --- docs/src/manual/nonlinear.md | 22 +++++++++++----------- src/macros.jl | 20 ++++++++++---------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/docs/src/manual/nonlinear.md b/docs/src/manual/nonlinear.md index 83993d7fa93..6aad0f6d36a 100644 --- a/docs/src/manual/nonlinear.md +++ b/docs/src/manual/nonlinear.md @@ -266,7 +266,7 @@ julia> model = Model(); julia> @variable(model, x); julia> expr = op_ifelse( - op_or(op_less_than(x, -1), op_greater_equal(x, 1)), + op_or(op_less_than(x, -1), op_greater_than_or_equal_to(x, 1)), x^2, 0.0, ) @@ -275,16 +275,16 @@ ifelse((x < -1) || (x >= 1), x², 0.0) The available functions are: -| JuMP function | Julia function | -| :-------------------------------- | :------------- | -| [`op_ifelse`](@ref) | `ifelse` | -| [`op_and`](@ref) | `&&` | -| [`op_or`](@ref) | `\|\|` | -| [`op_greater_than`](@ref) | `>` | -| [`op_greater_equal`](@ref) | `>=` | -| [`op_less_than`](@ref) | `<` | -| [`op_less_equal`](@ref) | `<=` | -| [`op_equal_to`](@ref) | `==` | +| JuMP function | Julia function | +| :------------------------------------ | :------------- | +| [`op_ifelse`](@ref) | `ifelse` | +| [`op_and`](@ref) | `&&` | +| [`op_or`](@ref) | `\|\|` | +| [`op_greater_than`](@ref) | `>` | +| [`op_greater_than_or_equal_to`](@ref) | `>=` | +| [`op_less_than`](@ref) | `<` | +| [`op_less_than_or_equal_to`](@ref) | `<=` | +| [`op_equal_to`](@ref) | `==` | ### Fields diff --git a/src/macros.jl b/src/macros.jl index 69e558f6110..fb79cd212bf 100644 --- a/src/macros.jl +++ b/src/macros.jl @@ -598,7 +598,7 @@ x > 2 const op_greater_than = NonlinearOperator(:>, >) """ - op_less_equal(x, y) + op_less_than_or_equal_to(x, y) A function that falls back to `x <= y`, but when called with JuMP variables or expressions, returns a [`GenericNonlinearExpr`](@ref). @@ -610,17 +610,17 @@ julia> model = Model(); julia> @variable(model, x); -julia> op_less_equal(2, 2) +julia> op_less_than_or_equal_to(2, 2) true -julia> op_less_equal(x, 2) +julia> op_less_than_or_equal_to(x, 2) x <= 2 ``` """ -const op_less_equal = NonlinearOperator(:<=, <=) +const op_less_than_or_equal_to = NonlinearOperator(:<=, <=) """ - op_greater_equal(x, y) + op_greater_than_or_equal_to(x, y) A function that falls back to `x >= y`, but when called with JuMP variables or expressions, returns a [`GenericNonlinearExpr`](@ref). @@ -632,14 +632,14 @@ julia> model = Model(); julia> @variable(model, x); -julia> op_greater_equal(2, 2) +julia> op_greater_than_or_equal_to(2, 2) true -julia> op_greater_equal(x, 2) +julia> op_greater_than_or_equal_to(x, 2) x >= 2 ``` """ -const op_greater_equal = NonlinearOperator(:>=, >=) +const op_greater_than_or_equal_to = NonlinearOperator(:>=, >=) """ op_equal_to(x, y) @@ -672,9 +672,9 @@ function _rewrite_to_jump_logic(x) elseif x.args[1] == :> return Expr(:call, op_greater_than, x.args[2:end]...) elseif x.args[1] == :<= - return Expr(:call, op_less_equal, x.args[2:end]...) + return Expr(:call, op_less_than_or_equal_to, x.args[2:end]...) elseif x.args[1] == :>= - return Expr(:call, op_greater_equal, x.args[2:end]...) + return Expr(:call, op_greater_than_or_equal_to, x.args[2:end]...) elseif x.args[1] == :(==) return Expr(:call, op_equal_to, x.args[2:end]...) end From 1893d438c1f8a69342895c88ea354f9b47220723 Mon Sep 17 00:00:00 2001 From: odow Date: Tue, 29 Aug 2023 08:50:34 +1200 Subject: [PATCH 16/23] s/user-defined function/user-defined operator --- docs/src/manual/nlp.md | 2 +- docs/src/manual/nonlinear.md | 38 +++++++++---------- docs/src/should_i_use.md | 4 +- .../tutorials/nonlinear/nested_problems.jl | 11 +++--- .../tutorials/nonlinear/tips_and_tricks.jl | 6 +-- .../nonlinear/user_defined_hessians.jl | 4 +- src/variables.jl | 2 +- 7 files changed, 34 insertions(+), 33 deletions(-) diff --git a/docs/src/manual/nlp.md b/docs/src/manual/nlp.md index bd9c2cda8ab..fa977f00f64 100644 --- a/docs/src/manual/nlp.md +++ b/docs/src/manual/nlp.md @@ -264,7 +264,7 @@ julia> @NLconstraint(model, *((x / 2)...) <= 0.0) ERROR: Unsupported use of the splatting operator. JuMP supports splatting only symbols. For example, `x...` is ok, but `(x + 1)...`, `[x; y]...` and `g(f(y)...)` are not. ``` -## [User-defined Functions](@id old_user_defined_functions) +## User-defined Functions JuMP natively supports the set of univariate and multivariate functions recognized by the `MOI.Nonlinear` submodule. In addition to this list of functions, it is possible diff --git a/docs/src/manual/nonlinear.md b/docs/src/manual/nonlinear.md index 6aad0f6d36a..8204503084d 100644 --- a/docs/src/manual/nonlinear.md +++ b/docs/src/manual/nonlinear.md @@ -305,19 +305,19 @@ julia> expr.args x ``` -## User-defined functions +## User-defined operators -In addition to a standard list of univariate and multivariate functions +In addition to a standard list of univariate and multivariate operators recognized by the `MOI.Nonlinear` submodule, JuMP supports *user-defined* -Julia functions. +Julia operators. !!! warning - User-defined functions must return a scalar output. For a work-around, see - [User-defined functions with vector outputs](@ref). + User-defined operators must return a scalar output. For a work-around, see + [User-defined operators with vector outputs](@ref). -### Register a function +### Register an operator -Register a user-defined function using the [`@register`](@ref) macro: +Register a user-defined operator using the [`@register`](@ref) macro: ```@repl using JuMP @@ -333,13 +333,13 @@ model = Model(); The arguments to [`@register`](@ref) are: 1. The model in which the function is registered. - 2. A Julia symbol object which serves as the name of the user-defined function + 2. A Julia symbol object which serves as the name of the user-defined operator in JuMP expressions. This name must not be the same as that of the function. 3. The number of scalar input arguments that the function takes. 4. A Julia method which computes the function. !!! warning - User-defined functions cannot be re-registered or deleted. + User-defined opterators cannot be re-registered or deleted. You can obtain a reference to the operator using the `model[:key]` syntax: @@ -351,7 +351,7 @@ model = Model(); op_square_2 = model[:op_square] ``` -### Registered functions without macros +### Registered operators without macros The [`@register`](@ref) macro is syntactic sugar for the [`register_nonlinear_operator`](@ref) method. Thus, the non-macro version of the @@ -401,7 +401,7 @@ julia> f(x) x² ``` -To force JuMP to treat `f` as a user-defined function and not trace it, register +To force JuMP to treat `f` as a user-defined operator and not trace it, register the function using [`register_nonlinear_operator`](@ref) and define a new method which manually creates a [`NonlinearExpr`](@ref): ```jldoctest nonlinear_invalid_redefinition @@ -418,7 +418,7 @@ log(f(x)) ### Register gradients and Hessians By default, JuMP will use automatic differentiation to compute the gradient and -Hessian of user-defined functions. If your function is not amenable to +Hessian of user-defined operators. If your function is not amenable to automatic differentiation, or you can compute analytic derivatives, you may pass additional arguments to [`@register`](@ref) to compute the first- and second-derivatives. @@ -475,9 +475,9 @@ you may assume only that `H` supports `size(H)` and `setindex!`. Finally, the matrix is treated as dense, so the performance will be poor on functions with high-dimensional input. -### User-defined functions with vector inputs +### User-defined operators with vector inputs -User-defined functions which take vectors as input arguments (for example, +User-defined operators which take vectors as input arguments (for example, `f(x::Vector)`) are *not* supported. Instead, use Julia's splatting syntax to create a function with scalar arguments. For example, instead of: ```julia @@ -500,27 +500,27 @@ f(x::Vector) = sum(x[i]^i for i in 1:length(x)) ### Automatic differentiation -JuMP does not support black-box optimization, so all user-defined functions must +JuMP does not support black-box optimization, so all user-defined operators must provide derivatives in some form. Fortunately, JuMP supports automatic -differentiation of user-defined functions. +differentiation of user-defined operators. !!! info Automatic differentiation is *not* finite differencing. JuMP's automatically computed derivatives are not subject to approximation error. JuMP uses [ForwardDiff.jl](https://github.com/JuliaDiff/ForwardDiff.jl) to -perform automatic differentiation of user-defined functions; see the ForwardDiff.jl +perform automatic differentiation of user-defined operators; see the ForwardDiff.jl [documentation](https://www.juliadiff.org/ForwardDiff.jl/v0.10.2/user/limitations.html) for a description of how to write a function suitable for automatic differentiation. -#### Common mistakes when writing a user-defined function +#### Common mistakes when writing a user-defined operator !!! warning Get an error like `No method matching Float64(::ForwardDiff.Dual)`? Read this section, and see the guidelines at [ForwardDiff.jl](https://www.juliadiff.org/ForwardDiff.jl/release-0.10/user/limitations.html). -The most common error is that your user-defined function is not generic with +The most common error is that your user-defined operator is not generic with respect to the number type, that is, don't assume that the input to the function is `Float64`. ```julia diff --git a/docs/src/should_i_use.md b/docs/src/should_i_use.md index aeb68d99c0e..af3171b0483 100644 --- a/docs/src/should_i_use.md +++ b/docs/src/should_i_use.md @@ -77,9 +77,9 @@ consider using other packages such as: ### Black-box, derivative free, or unconstrained optimization JuMP does support nonlinear programs with constraints and objectives containing -user-defined functions. However, the functions must be automatically +user-defined operators. However, the functions must be automatically differentiable, or need to provide explicit derivatives. (See -[User-defined functions](@ref) for more information.) +[User-defined operators](@ref) for more information.) If your function is a black-box that is non-differentiable (for example, it is the output of a simulation written in C++), JuMP is not the right tool for the diff --git a/docs/src/tutorials/nonlinear/nested_problems.jl b/docs/src/tutorials/nonlinear/nested_problems.jl index 4cc7b8624e3..f64f6ed8962 100644 --- a/docs/src/tutorials/nonlinear/nested_problems.jl +++ b/docs/src/tutorials/nonlinear/nested_problems.jl @@ -24,11 +24,12 @@ # where an *upper* problem uses the results from the optimization of a *lower* # subproblem. # -# To model the problem, we define a user-defined function to handle the decomposition -# of the lower problem inside the upper one. Finally, we show how to improve -# the performance by using a cache that avoids resolving the lower problem. +# To model the problem, we define a user-defined operator to handle the +# decomposition of the lower problem inside the upper one. Finally, we show how +# to improve the performance by using a cache that avoids resolving the lower +# problem. # -# For a simpler example of writing a user-defined function, +# For a simpler example of writing a user-defined operator, # see the [User-defined Hessians](@ref) tutorial. # This tutorial uses the following packages: @@ -104,7 +105,7 @@ end # \end{array} # ``` -# This looks like a nonlinear optimization problem with a user-defined function +# This looks like a nonlinear optimization problem with a user-defined operator # ``V``! However, because ``V`` solves an optimization problem internally, we # can't use automatic differentiation to compute the first and second # derivatives. Instead, we can use JuMP's ability to pass callback functions diff --git a/docs/src/tutorials/nonlinear/tips_and_tricks.jl b/docs/src/tutorials/nonlinear/tips_and_tricks.jl index 48e7fe6d30a..750ea97ff0b 100644 --- a/docs/src/tutorials/nonlinear/tips_and_tricks.jl +++ b/docs/src/tutorials/nonlinear/tips_and_tricks.jl @@ -12,9 +12,9 @@ using JuMP import Ipopt import Test -# ## User-defined functions with vector outputs +# ## User-defined operators with vector outputs -# A common situation is to have a user-defined function like the following that +# A common situation is to have a user-defined operator like the following that # returns multiple outputs (we define `function_calls` to keep track of how # many times we call this method): @@ -31,7 +31,7 @@ end # term might be used in a constraint, and often they share work that is # expensive to evaluate. -# This is a problem for JuMP, because it requires user-defined functions to +# This is a problem for JuMP, because it requires user-defined operators to # return a single number. One option is to define two separate functions, the # first returning the first argument, and the second returning the second # argument. diff --git a/docs/src/tutorials/nonlinear/user_defined_hessians.jl b/docs/src/tutorials/nonlinear/user_defined_hessians.jl index 9f49f33dc9b..ba124e371c5 100644 --- a/docs/src/tutorials/nonlinear/user_defined_hessians.jl +++ b/docs/src/tutorials/nonlinear/user_defined_hessians.jl @@ -20,8 +20,8 @@ # # User-defined Hessians -# In this tutorial, we explain how to write a user-defined function (see -# [User-defined functions](@ref)) with a Hessian matrix explicitly provided by +# In this tutorial, we explain how to write a user-defined operator (see +# [User-defined operators](@ref)) with a Hessian matrix explicitly provided by # the user. # # For a more advanced example, see [Nested optimization problems](@ref). diff --git a/src/variables.jl b/src/variables.jl index 46e63ff2702..f5b4026a908 100644 --- a/src/variables.jl +++ b/src/variables.jl @@ -2039,7 +2039,7 @@ There are three common mistakes that lead to this. @variable(model, x) @expression(model, foo(x)) ``` - To fix, create a nonlinear model with a user-defined function: + To fix, create a nonlinear model with a user-defined operator: ```julia foo(x) = x $(sym) 1 ? 0 : 1 - x model = Model() From 1d38dd6d19af023eab71e890fcd42518aa09c7f5 Mon Sep 17 00:00:00 2001 From: odow Date: Tue, 29 Aug 2023 08:57:36 +1200 Subject: [PATCH 17/23] Update docstrings of register macro --- src/nlp_expr.jl | 54 ++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 51 insertions(+), 3 deletions(-) diff --git a/src/nlp_expr.jl b/src/nlp_expr.jl index a1ecef4d0cd..c14e647229e 100644 --- a/src/nlp_expr.jl +++ b/src/nlp_expr.jl @@ -862,13 +862,33 @@ end Register a new nonlinear operator with `dim` input arguments to `model` and associate it with the name `name`. -The function `f` evaluates the function and must return a scalar. +The function `f` evaluates the operator and must return a scalar. The optional function `∇f` evaluates the first derivative, and the optional function `∇²f` evaluates the second derivative. `∇²f` may be provided only if `∇f` is also provided. +## Univariate syntax + +If `dim == 1`, then the method signatures of each function must be: + + * `f(::T)::T where {T<:Real}` + * `∇f(::T)::T where {T<:Real}` + * `∇²f(::T)::T where {T<:Real}` + +## Multivariate syntax + +If `dim > 1`, then the method signatures of each function must be: + + * `f(x::T...)::T where {T<:Real}` + * `∇f(g::AbstractVector{T}, x::T...)::Nothing where {T<:Real}` + * `∇²f(H::AbstractMatrix{T}, x::T...)::Nothing where {T<:Real}` + +Where the gradient vector `g` and Hessian matrix `H` are filled in-place. For +the Hessian, you must fill in the non-zero upper-triangular entries only. +Setting an off-diagonal lower-triangular element may error. + ## Example ```jldoctest @@ -954,8 +974,36 @@ end """ @register(model, operator, dim, f[, ∇f[, ∇²f]]) -Register the nonlinear operator `operator` in `model`, and create a new -[`NonlinearOperator`](@ref) object called `operator` in the current scope. +Register the nonlinear operator `operator` in `model` with `dim` arguments, and +create a new [`NonlinearOperator`](@ref) object called `operator` in the current +scope. + +The function `f` evaluates the operator and must return a scalar. + +The optional function `∇f` evaluates the first derivative, and the optional +function `∇²f` evaluates the second derivative. + +`∇²f` may be provided only if `∇f` is also provided. + +## Univariate syntax + +If `dim == 1`, then the method signatures of each function must be: + + * `f(::T)::T where {T<:Real}` + * `∇f(::T)::T where {T<:Real}` + * `∇²f(::T)::T where {T<:Real}` + +## Multivariate syntax + +If `dim > 1`, then the method signatures of each function must be: + + * `f(x::T...)::T where {T<:Real}` + * `∇f(g::AbstractVector{T}, x::T...)::Nothing where {T<:Real}` + * `∇²f(H::AbstractMatrix{T}, x::T...)::Nothing where {T<:Real}` + +Where the gradient vector `g` and Hessian matrix `H` are filled in-place. For +the Hessian, you must fill in the non-zero upper-triangular entries only. +Setting an off-diagonal lower-triangular element may error. ## Example From a9f160eff8d185747b668d47cf8207a648887dd3 Mon Sep 17 00:00:00 2001 From: odow Date: Tue, 29 Aug 2023 09:00:04 +1200 Subject: [PATCH 18/23] Change to op_strictly_ --- docs/src/manual/nonlinear.md | 22 ++++++++++---------- src/macros.jl | 40 ++++++++++++++++++------------------ 2 files changed, 31 insertions(+), 31 deletions(-) diff --git a/docs/src/manual/nonlinear.md b/docs/src/manual/nonlinear.md index 8204503084d..94f38df08c9 100644 --- a/docs/src/manual/nonlinear.md +++ b/docs/src/manual/nonlinear.md @@ -266,7 +266,7 @@ julia> model = Model(); julia> @variable(model, x); julia> expr = op_ifelse( - op_or(op_less_than(x, -1), op_greater_than_or_equal_to(x, 1)), + op_or(op_strictly_less_than(x, -1), op_greater_than(x, 1)), x^2, 0.0, ) @@ -275,16 +275,16 @@ ifelse((x < -1) || (x >= 1), x², 0.0) The available functions are: -| JuMP function | Julia function | -| :------------------------------------ | :------------- | -| [`op_ifelse`](@ref) | `ifelse` | -| [`op_and`](@ref) | `&&` | -| [`op_or`](@ref) | `\|\|` | -| [`op_greater_than`](@ref) | `>` | -| [`op_greater_than_or_equal_to`](@ref) | `>=` | -| [`op_less_than`](@ref) | `<` | -| [`op_less_than_or_equal_to`](@ref) | `<=` | -| [`op_equal_to`](@ref) | `==` | +| JuMP function | Julia function | +| :--------------------------------- | :------------- | +| [`op_ifelse`](@ref) | `ifelse` | +| [`op_and`](@ref) | `&&` | +| [`op_or`](@ref) | `\|\|` | +| [`op_greater_than`](@ref) | `>=` | +| [`op_less_than`](@ref) | `<=` | +| [`op_equal_to`](@ref) | `==` | +| [`op_strictly_greater_than`](@ref) | `>` | +| [`op_strictly_less_than`](@ref) | `<` | ### Fields diff --git a/src/macros.jl b/src/macros.jl index fb79cd212bf..78e29458548 100644 --- a/src/macros.jl +++ b/src/macros.jl @@ -554,7 +554,7 @@ const op_or = NonlinearOperator(:||, |) # syntax and is not a regular Julia function, but the MOI operator is `:||`. """ - op_less_than(x, y) + op_strictly_less_than(x, y) A function that falls back to `x < y`, but when called with JuMP variables or expressions, returns a [`GenericNonlinearExpr`](@ref). @@ -566,17 +566,17 @@ julia> model = Model(); julia> @variable(model, x); -julia> op_less_than(1, 2) +julia> op_strictly_less_than(1, 2) true -julia> op_less_than(x, 2) +julia> op_strictly_less_than(x, 2) x < 2 ``` """ -const op_less_than = NonlinearOperator(:<, <) +const op_strictly_less_than = NonlinearOperator(:<, <) """ - op_greater_than(x, y) + op_strictly_greater_than(x, y) A function that falls back to `x > y`, but when called with JuMP variables or expressions, returns a [`GenericNonlinearExpr`](@ref). @@ -588,17 +588,17 @@ julia> model = Model(); julia> @variable(model, x); -julia> op_greater_than(1, 2) +julia> op_strictly_greater_than(1, 2) false -julia> op_greater_than(x, 2) +julia> op_strictly_greater_than(x, 2) x > 2 ``` """ -const op_greater_than = NonlinearOperator(:>, >) +const op_strictly_greater_than = NonlinearOperator(:>, >) """ - op_less_than_or_equal_to(x, y) + op_less_than(x, y) A function that falls back to `x <= y`, but when called with JuMP variables or expressions, returns a [`GenericNonlinearExpr`](@ref). @@ -610,17 +610,17 @@ julia> model = Model(); julia> @variable(model, x); -julia> op_less_than_or_equal_to(2, 2) +julia> op_less_than(2, 2) true -julia> op_less_than_or_equal_to(x, 2) +julia> op_less_than(x, 2) x <= 2 ``` """ -const op_less_than_or_equal_to = NonlinearOperator(:<=, <=) +const op_less_than = NonlinearOperator(:<=, <=) """ - op_greater_than_or_equal_to(x, y) + op_greater_than(x, y) A function that falls back to `x >= y`, but when called with JuMP variables or expressions, returns a [`GenericNonlinearExpr`](@ref). @@ -632,14 +632,14 @@ julia> model = Model(); julia> @variable(model, x); -julia> op_greater_than_or_equal_to(2, 2) +julia> op_greater_than(2, 2) true -julia> op_greater_than_or_equal_to(x, 2) +julia> op_greater_than(x, 2) x >= 2 ``` """ -const op_greater_than_or_equal_to = NonlinearOperator(:>=, >=) +const op_greater_than = NonlinearOperator(:>=, >=) """ op_equal_to(x, y) @@ -668,13 +668,13 @@ function _rewrite_to_jump_logic(x) op = if x.args[1] == :ifelse return Expr(:call, op_ifelse, x.args[2:end]...) elseif x.args[1] == :< - return Expr(:call, op_less_than, x.args[2:end]...) + return Expr(:call, op_strictly_less_than, x.args[2:end]...) elseif x.args[1] == :> - return Expr(:call, op_greater_than, x.args[2:end]...) + return Expr(:call, op_strictly_greater_than, x.args[2:end]...) elseif x.args[1] == :<= - return Expr(:call, op_less_than_or_equal_to, x.args[2:end]...) + return Expr(:call, op_less_than, x.args[2:end]...) elseif x.args[1] == :>= - return Expr(:call, op_greater_than_or_equal_to, x.args[2:end]...) + return Expr(:call, op_greater_than, x.args[2:end]...) elseif x.args[1] == :(==) return Expr(:call, op_equal_to, x.args[2:end]...) end From 4cfd5a488e1371b15222fc313390570f2b51f299 Mon Sep 17 00:00:00 2001 From: odow Date: Tue, 29 Aug 2023 09:51:16 +1200 Subject: [PATCH 19/23] Fix docs --- docs/src/manual/nlp.md | 2 +- docs/src/manual/nonlinear.md | 2 +- docs/src/should_i_use.md | 2 +- docs/src/tutorials/nonlinear/user_defined_hessians.jl | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/src/manual/nlp.md b/docs/src/manual/nlp.md index fa977f00f64..28029baac0d 100644 --- a/docs/src/manual/nlp.md +++ b/docs/src/manual/nlp.md @@ -279,7 +279,7 @@ the function is not available in the scope of the nonlinear expression. !!! warning User-defined functions must return a scalar output. For a work-around, see - [User-defined functions with vector outputs](@ref). + [User-defined operators with vector outputs](@ref). ### Automatic differentiation diff --git a/docs/src/manual/nonlinear.md b/docs/src/manual/nonlinear.md index 94f38df08c9..8078d1bc5a2 100644 --- a/docs/src/manual/nonlinear.md +++ b/docs/src/manual/nonlinear.md @@ -305,7 +305,7 @@ julia> expr.args x ``` -## User-defined operators +## [User-defined operators](@id jump_user_defined_operators) In addition to a standard list of univariate and multivariate operators recognized by the `MOI.Nonlinear` submodule, JuMP supports *user-defined* diff --git a/docs/src/should_i_use.md b/docs/src/should_i_use.md index af3171b0483..19d0f6397d0 100644 --- a/docs/src/should_i_use.md +++ b/docs/src/should_i_use.md @@ -79,7 +79,7 @@ consider using other packages such as: JuMP does support nonlinear programs with constraints and objectives containing user-defined operators. However, the functions must be automatically differentiable, or need to provide explicit derivatives. (See -[User-defined operators](@ref) for more information.) +[User-defined operators](@ref jump_user_defined_operators) for more information.) If your function is a black-box that is non-differentiable (for example, it is the output of a simulation written in C++), JuMP is not the right tool for the diff --git a/docs/src/tutorials/nonlinear/user_defined_hessians.jl b/docs/src/tutorials/nonlinear/user_defined_hessians.jl index ba124e371c5..d82d4181aff 100644 --- a/docs/src/tutorials/nonlinear/user_defined_hessians.jl +++ b/docs/src/tutorials/nonlinear/user_defined_hessians.jl @@ -21,8 +21,8 @@ # # User-defined Hessians # In this tutorial, we explain how to write a user-defined operator (see -# [User-defined operators](@ref)) with a Hessian matrix explicitly provided by -# the user. +# [User-defined operators](@ref jump_user_defined_operators)) with a Hessian +# matrix explicitly provided by the user. # # For a more advanced example, see [Nested optimization problems](@ref). From 2f9a78110d9b065baf44d748c687f1ad73c78161 Mon Sep 17 00:00:00 2001 From: Oscar Dowson Date: Tue, 29 Aug 2023 12:19:07 +1200 Subject: [PATCH 20/23] Update docs/src/manual/nonlinear.md --- docs/src/manual/nonlinear.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/manual/nonlinear.md b/docs/src/manual/nonlinear.md index 8078d1bc5a2..d808ac54456 100644 --- a/docs/src/manual/nonlinear.md +++ b/docs/src/manual/nonlinear.md @@ -339,7 +339,7 @@ The arguments to [`@register`](@ref) are: 4. A Julia method which computes the function. !!! warning - User-defined opterators cannot be re-registered or deleted. + User-defined operators cannot be re-registered or deleted. You can obtain a reference to the operator using the `model[:key]` syntax: From 2393c02eb659edb0b58925475c99e3583810584f Mon Sep 17 00:00:00 2001 From: odow Date: Wed, 30 Aug 2023 08:12:05 +1200 Subject: [PATCH 21/23] Rename register macro to operator --- docs/src/manual/nonlinear.md | 62 +++++++++---------- .../tutorials/applications/power_systems.jl | 2 +- .../tutorials/nonlinear/nested_problems.jl | 4 +- .../tutorials/nonlinear/tips_and_tricks.jl | 8 +-- .../nonlinear/user_defined_hessians.jl | 4 +- src/macros.jl | 20 +++--- src/nlp_expr.jl | 51 ++++++++------- src/variables.jl | 2 +- test/perf/NonlinearBenchmark.jl | 2 +- test/test_nlp_expr.jl | 32 +++++----- 10 files changed, 93 insertions(+), 94 deletions(-) diff --git a/docs/src/manual/nonlinear.md b/docs/src/manual/nonlinear.md index d808ac54456..ae4ace22b9d 100644 --- a/docs/src/manual/nonlinear.md +++ b/docs/src/manual/nonlinear.md @@ -266,7 +266,7 @@ julia> model = Model(); julia> @variable(model, x); julia> expr = op_ifelse( - op_or(op_strictly_less_than(x, -1), op_greater_than(x, 1)), + op_or(op_strictly_less_than(x, -1), op_greater_than_or_equal_to(x, 1)), x^2, 0.0, ) @@ -280,8 +280,8 @@ The available functions are: | [`op_ifelse`](@ref) | `ifelse` | | [`op_and`](@ref) | `&&` | | [`op_or`](@ref) | `\|\|` | -| [`op_greater_than`](@ref) | `>=` | -| [`op_less_than`](@ref) | `<=` | +| [`op_greater_than_or_equal_to`](@ref) | `>=` | +| [`op_less_than_or_equal_to`](@ref) | `<=` | | [`op_equal_to`](@ref) | `==` | | [`op_strictly_greater_than`](@ref) | `>` | | [`op_strictly_less_than`](@ref) | `<` | @@ -315,31 +315,31 @@ Julia operators. User-defined operators must return a scalar output. For a work-around, see [User-defined operators with vector outputs](@ref). -### Register an operator +### Add an operator -Register a user-defined operator using the [`@register`](@ref) macro: +Add a user-defined operator using the [`@operator`](@ref) macro: ```@repl using JuMP square(x) = x^2 f(x, y) = (x - 1)^2 + (y - 2)^2 model = Model(); -@register(model, op_square, 1, square) -@register(model, op_f, 2, f) +@operator(model, op_square, 1, square) +@operator(model, op_f, 2, f) @variable(model, x[1:2]); @objective(model, Min, op_f(x[1], op_square(x[2]))) ``` -The arguments to [`@register`](@ref) are: +The arguments to [`@operator`](@ref) are: - 1. The model in which the function is registered. + 1. The model to which the operator is added. 2. A Julia symbol object which serves as the name of the user-defined operator in JuMP expressions. This name must not be the same as that of the function. 3. The number of scalar input arguments that the function takes. 4. A Julia method which computes the function. !!! warning - User-defined operators cannot be re-registered or deleted. + User-defined operators cannot be deleted. You can obtain a reference to the operator using the `model[:key]` syntax: @@ -347,14 +347,14 @@ You can obtain a reference to the operator using the `model[:key]` syntax: using JuMP square(x) = x^2 model = Model(); -@register(model, op_square, 1, square) +@operator(model, op_square, 1, square) op_square_2 = model[:op_square] ``` -### Registered operators without macros +### Add an operator without macros -The [`@register`](@ref) macro is syntactic sugar for the -[`register_nonlinear_operator`](@ref) method. Thus, the non-macro version of the +The [`@operator`](@ref) macro is syntactic sugar for the +[`add_nonlinear_operator`](@ref) method. Thus, the non-macro version of the preceding example is: ```@repl @@ -362,15 +362,15 @@ using JuMP square(x) = x^2 f(x, y) = (x - 1)^2 + (y - 2)^2 model = Model(); -op_square = register_nonlinear_operator(model, 1, square; name = :op_square) +op_square = add_nonlinear_operator(model, 1, square; name = :op_square) model[:op_square] = op_square -op_f = register_nonlinear_operator(model, 2, f; name = :op_f) +op_f = add_nonlinear_operator(model, 2, f; name = :op_f) model[:op_f] = op_f @variable(model, x[1:2]); @objective(model, Min, op_f(x[1], op_square(x[2]))) ``` -### Registering with the same name as an existing function +### Operators with the same name as an existing function A common error encountered is the following: ```jldoctest nonlinear_invalid_redefinition @@ -381,19 +381,19 @@ julia> model = Model(); julia> f(x) = x^2 f (generic function with 1 method) -julia> @register(model, f, 1, f) -ERROR: Unable to register the nonlinear operator `:f` with the same name as +julia> @operator(model, f, 1, f) +ERROR: Unable to add the nonlinear operator `:f` with the same name as an existing function. [...] ``` -This error occurs because `@register(model, f, 1, f)` is equivalent to: +This error occurs because `@operator(model, f, 1, f)` is equivalent to: ```julia -julia> f = register_nonlinear_operator(model, 1, f; name = :f) +julia> f = add_nonlinear_operator(model, 1, f; name = :f) ``` but `f` already exists as a Julia function. -If you evaluate the function without registering it, JuMP will trace the -function using operator overloading: +If you evaluate the function without adding it as an operator, JuMP will trace +the function using operator overloading: ```jldoctest nonlinear_invalid_redefinition julia> @variable(model, x); @@ -401,11 +401,11 @@ julia> f(x) x² ``` -To force JuMP to treat `f` as a user-defined operator and not trace it, register -the function using [`register_nonlinear_operator`](@ref) and define a new method +To force JuMP to treat `f` as a user-defined operator and not trace it, add +the operator using [`add_nonlinear_operator`](@ref) and define a new method which manually creates a [`NonlinearExpr`](@ref): ```jldoctest nonlinear_invalid_redefinition -julia> _ = register_nonlinear_operator(model, 1, f; name = :f) +julia> _ = add_nonlinear_operator(model, 1, f; name = :f) NonlinearOperator(:f, f) julia> f(x::AbstractJuMPScalar) = NonlinearExpr(:f, Any[x]) @@ -415,12 +415,12 @@ julia> @expression(model, log(f(x))) log(f(x)) ``` -### Register gradients and Hessians +### Gradients and Hessians By default, JuMP will use automatic differentiation to compute the gradient and Hessian of user-defined operators. If your function is not amenable to automatic differentiation, or you can compute analytic derivatives, you may pass -additional arguments to [`@register`](@ref) to compute the first- and +additional arguments to [`@operator`](@ref) to compute the first- and second-derivatives. #### Univariate functions @@ -434,7 +434,7 @@ f(x) = x^2 ∇f(x) = 2x ∇²f(x) = 2 model = Model(); -@register(model, op_f, 1, f, ∇f, ∇²f) # Providing ∇²f is optional +@operator(model, op_f, 1, f, ∇f, ∇²f) # Providing ∇²f is optional @variable(model, x) @objective(model, Min, op_f(x)) ``` @@ -461,7 +461,7 @@ function ∇²f(H::AbstractMatrix{T}, x::T...) where {T} return end model = Model(); -@register(model, rosenbrock, 2, f, ∇f, ∇²f) # Providing ∇²f is optional +@operator(model, rosenbrock, 2, f, ∇f, ∇²f) # Providing ∇²f is optional @variable(model, x[1:2]) @objective(model, Min, rosenbrock(x[1], x[2])) ``` @@ -494,7 +494,7 @@ using JuMP model = Model(); @variable(model, x[1:5]) f(x::Vector) = sum(x[i]^i for i in 1:length(x)) -@register(model, op_f, 5, (x...) -> f(collect(x))) +@operator(model, op_f, 5, (x...) -> f(collect(x))) @objective(model, Min, op_f(x...)) ``` diff --git a/docs/src/tutorials/applications/power_systems.jl b/docs/src/tutorials/applications/power_systems.jl index ddf11753127..b5e9a5d289a 100644 --- a/docs/src/tutorials/applications/power_systems.jl +++ b/docs/src/tutorials/applications/power_systems.jl @@ -513,7 +513,7 @@ function solve_nonlinear_economic_dispatch( if silent set_silent(model) end - @register(model, op_tcf, 1, thermal_cost_function) + @operator(model, op_tcf, 1, thermal_cost_function) N = length(generators) @variable(model, generators[i].min <= g[i = 1:N] <= generators[i].max) @variable(model, 0 <= w <= scenario.wind) diff --git a/docs/src/tutorials/nonlinear/nested_problems.jl b/docs/src/tutorials/nonlinear/nested_problems.jl index f64f6ed8962..54865d05c13 100644 --- a/docs/src/tutorials/nonlinear/nested_problems.jl +++ b/docs/src/tutorials/nonlinear/nested_problems.jl @@ -142,7 +142,7 @@ end model = Model(Ipopt.Optimizer) @variable(model, x[1:2] >= 0) -@register(model, op_V, 2, V, ∇V, ∇²V) +@operator(model, op_V, 2, V, ∇V, ∇²V) @objective(model, Min, x[1]^2 + x[2]^2 + op_V(x[1], x[2])) optimize!(model) solution_summary(model) @@ -214,7 +214,7 @@ end model = Model(Ipopt.Optimizer) @variable(model, x[1:2] >= 0) cache = Cache(Float64[], NaN, Float64[]) -@register( +@operator( model, op_cached_f, 2, diff --git a/docs/src/tutorials/nonlinear/tips_and_tricks.jl b/docs/src/tutorials/nonlinear/tips_and_tricks.jl index 750ea97ff0b..26f96d0d97f 100644 --- a/docs/src/tutorials/nonlinear/tips_and_tricks.jl +++ b/docs/src/tutorials/nonlinear/tips_and_tricks.jl @@ -46,8 +46,8 @@ foo_2(x, y) = foo(x, y)[2] model = Model(Ipopt.Optimizer) set_silent(model) @variable(model, x[1:2] >= 0, start = 0.1) -@register(model, op_foo_1, 2, foo_1) -@register(model, op_foo_2, 2, foo_2) +@operator(model, op_foo_1, 2, foo_1) +@operator(model, op_foo_2, 2, foo_2) @objective(model, Max, op_foo_1(x[1], x[2])) @constraint(model, op_foo_2(x[1], x[2]) <= 2) function_calls = 0 @@ -114,8 +114,8 @@ println("function_calls = ", function_calls) model = Model(Ipopt.Optimizer) set_silent(model) @variable(model, x[1:2] >= 0, start = 0.1) -@register(model, op_foo_1, 2, memoized_foo[1]) -@register(model, op_foo_2, 2, memoized_foo[2]) +@operator(model, op_foo_1, 2, memoized_foo[1]) +@operator(model, op_foo_2, 2, memoized_foo[2]) @objective(model, Max, op_foo_1(x[1], x[2])) @constraint(model, op_foo_2(x[1], x[2]) <= 2) function_calls = 0 diff --git a/docs/src/tutorials/nonlinear/user_defined_hessians.jl b/docs/src/tutorials/nonlinear/user_defined_hessians.jl index d82d4181aff..b7f75e42d20 100644 --- a/docs/src/tutorials/nonlinear/user_defined_hessians.jl +++ b/docs/src/tutorials/nonlinear/user_defined_hessians.jl @@ -65,11 +65,11 @@ end # you may assume only that `H` supports `size(H)` and `setindex!`. # Now that we have the function, its gradient, and its Hessian, we can construct -# a JuMP model, register the function, and use it in a macro: +# a JuMP model, add the operator, and use it in a macro: model = Model(Ipopt.Optimizer) @variable(model, x[1:2]) -@register(model, op_rosenbrock, 2, rosenbrock, ∇rosenbrock, ∇²rosenbrock) +@operator(model, op_rosenbrock, 2, rosenbrock, ∇rosenbrock, ∇²rosenbrock) @objective(model, Min, op_rosenbrock(x[1], x[2])) optimize!(model) solution_summary(model; verbose = true) diff --git a/src/macros.jl b/src/macros.jl index 78e29458548..da97d5e5cda 100644 --- a/src/macros.jl +++ b/src/macros.jl @@ -598,7 +598,7 @@ x > 2 const op_strictly_greater_than = NonlinearOperator(:>, >) """ - op_less_than(x, y) + op_less_than_or_equal_to(x, y) A function that falls back to `x <= y`, but when called with JuMP variables or expressions, returns a [`GenericNonlinearExpr`](@ref). @@ -610,17 +610,17 @@ julia> model = Model(); julia> @variable(model, x); -julia> op_less_than(2, 2) +julia> op_less_than_or_equal_to(2, 2) true -julia> op_less_than(x, 2) +julia> op_less_than_or_equal_to(x, 2) x <= 2 ``` """ -const op_less_than = NonlinearOperator(:<=, <=) +const op_less_than_or_equal_to = NonlinearOperator(:<=, <=) """ - op_greater_than(x, y) + op_greater_than_or_equal_to(x, y) A function that falls back to `x >= y`, but when called with JuMP variables or expressions, returns a [`GenericNonlinearExpr`](@ref). @@ -632,14 +632,14 @@ julia> model = Model(); julia> @variable(model, x); -julia> op_greater_than(2, 2) +julia> op_greater_than_or_equal_to(2, 2) true -julia> op_greater_than(x, 2) +julia> op_greater_than_or_equal_to(x, 2) x >= 2 ``` """ -const op_greater_than = NonlinearOperator(:>=, >=) +const op_greater_than_or_equal_to = NonlinearOperator(:>=, >=) """ op_equal_to(x, y) @@ -672,9 +672,9 @@ function _rewrite_to_jump_logic(x) elseif x.args[1] == :> return Expr(:call, op_strictly_greater_than, x.args[2:end]...) elseif x.args[1] == :<= - return Expr(:call, op_less_than, x.args[2:end]...) + return Expr(:call, op_less_than_or_equal_to, x.args[2:end]...) elseif x.args[1] == :>= - return Expr(:call, op_greater_than, x.args[2:end]...) + return Expr(:call, op_greater_than_or_equal_to, x.args[2:end]...) elseif x.args[1] == :(==) return Expr(:call, op_equal_to, x.args[2:end]...) end diff --git a/src/nlp_expr.jl b/src/nlp_expr.jl index c14e647229e..f16e867c6bb 100644 --- a/src/nlp_expr.jl +++ b/src/nlp_expr.jl @@ -25,11 +25,10 @@ and the default list of supported multivariate operators is given by: * [`MOI.Nonlinear.DEFAULT_MULTIVARIATE_OPERATORS`](@ref) -Additional operators can be registered by setting a -[`MOI.UserDefinedFunction`](@ref) attribute. +Additional operators can be add using [`@operator`](@ref). See the full list of operators supported by a [`MOI.ModelLike`](@ref) by -querying [`MOI.ListOfSupportedNonlinearOperators`](@ref). +querying the [`MOI.ListOfSupportedNonlinearOperators`](@ref) attribute. ## `args` @@ -636,8 +635,8 @@ function _evaluate_user_defined_function( udf = MOI.get(model, MOI.UserDefinedFunction(op, nargs)) if udf === nothing return error( - "Unable to evaluate nonlinear operator $op because it is not " * - "registered", + "Unable to evaluate nonlinear operator $op because it was " * + "not added as an operator.", ) end args = [_evaluate_expr(registry, f, arg) for arg in expr.args] @@ -775,8 +774,8 @@ When called with non-JuMP types, the struct returns the evaluation of `func(args...)`. Unless `head` is special-cased by the optimizer, the operator must have already -been added to the model using [`register_nonlinear_operator`](@ref) or -[`@register`](@ref). +been added to the model using [`add_nonlinear_operator`](@ref) or +[`@operator`](@ref). ## Example @@ -795,7 +794,7 @@ julia> ∇f(x::Float64) = 2 * x julia> ∇²f(x::Float64) = 2.0 ∇²f (generic function with 1 method) -julia> @register(model, op_f, 1, f, ∇f, ∇²f) +julia> @operator(model, op_f, 1, f, ∇f, ∇²f) NonlinearOperator(:op_f, f) julia> bar = NonlinearOperator(:op_f, f) @@ -850,7 +849,7 @@ function (f::NonlinearOperator)(x, y, z...) end """ - register_nonlinear_operator( + add_nonlinear_operator( model::Model, dim::Int, f::Function, @@ -859,7 +858,7 @@ end [name::Symbol = Symbol(f),] ) -Register a new nonlinear operator with `dim` input arguments to `model` and +Add a new nonlinear operator with `dim` input arguments to `model` and associate it with the name `name`. The function `f` evaluates the operator and must return a scalar. @@ -906,7 +905,7 @@ julia> ∇f(x::Float64) = 2 * x julia> ∇²f(x::Float64) = 2.0 ∇²f (generic function with 1 method) -julia> op_f = register_nonlinear_operator(model, 1, f, ∇f, ∇²f) +julia> op_f = add_nonlinear_operator(model, 1, f, ∇f, ∇²f) NonlinearOperator(:f, f) julia> @objective(model, Min, op_f(x)) @@ -916,7 +915,7 @@ julia> op_f(2.0) 4.0 ``` """ -function register_nonlinear_operator( +function add_nonlinear_operator( model::GenericModel, dim::Int, f::Function, @@ -926,7 +925,7 @@ function register_nonlinear_operator( nargs = 1 + N if !(1 <= nargs <= 3) error( - "Unable to register operator $name: invalid number of functions " * + "Unable to add operator $name: invalid number of functions " * "provided. Got $nargs, but expected 1 (if function only), 2 (if " * "function and gradient), or 3 (if function, gradient, and " * "hesssian provided)", @@ -943,27 +942,27 @@ end function _catch_redefinition_constant_error(op::Symbol, f::Function) if op == Symbol(f) error(""" - Unable to register the nonlinear operator `:$op` with the same name as + Unable to add the nonlinear operator `:$op` with the same name as an existing function. For example, this code will error: ```julia model = Model() f(x) = x^2 - @register(model, f, 1, f) + @operator(model, f, 1, f) ``` because it is equivalent to: ```julia model = Model() f(x) = x^2 - f = register_nonlinear_operator(model, 1, f; name = :f) + f = add_nonlinear_operator(model, 1, f; name = :f) ``` To fix, use a unique name, like `op_$op`: ```julia model = Model() f(x) = x^2 - @register(model, op_f, 1, f) + @operator(model, op_f, 1, f) @expression(model, op_f(x)) ``` """) @@ -972,9 +971,9 @@ function _catch_redefinition_constant_error(op::Symbol, f::Function) end """ - @register(model, operator, dim, f[, ∇f[, ∇²f]]) + @operator(model, operator, dim, f[, ∇f[, ∇²f]]) -Register the nonlinear operator `operator` in `model` with `dim` arguments, and +Add the nonlinear operator `operator` in `model` with `dim` arguments, and create a new [`NonlinearOperator`](@ref) object called `operator` in the current scope. @@ -1022,7 +1021,7 @@ julia> ∇f(x::Float64) = 2 * x julia> ∇²f(x::Float64) = 2.0 ∇²f (generic function with 1 method) -julia> @register(model, op_f, 1, f, ∇f, ∇²f) +julia> @operator(model, op_f, 1, f, ∇f, ∇²f) NonlinearOperator(:op_f, f) julia> @objective(model, Min, op_f(x)) @@ -1041,8 +1040,8 @@ op_f(x) ## Non-macro version This macro is provided as helpful syntax that matches the style of the rest of -the JuMP macros. However, you may also register operators outside the macro -using [`register_nonlinear_operator`](@ref). For example: +the JuMP macros. However, you may also add operators outside the macro +using [`add_nonlinear_operator`](@ref). For example: ```jldoctest julia> model = Model(); @@ -1050,7 +1049,7 @@ julia> model = Model(); julia> f(x) = x^2 f (generic function with 1 method) -julia> @register(model, op_f, 1, f) +julia> @operator(model, op_f, 1, f) NonlinearOperator(:op_f, f) ``` is equivalent to @@ -1060,15 +1059,15 @@ julia> model = Model(); julia> f(x) = x^2 f (generic function with 1 method) -julia> op_f = model[:op_f] = register_nonlinear_operator(model, 1, f; name = :op_f) +julia> op_f = model[:op_f] = add_nonlinear_operator(model, 1, f; name = :op_f) NonlinearOperator(:op_f, f) ``` """ -macro register(model, op, dim, f, args...) +macro operator(model, op, dim, f, args...) return _macro_assign_and_return( quote _catch_redefinition_constant_error($(Meta.quot(op)), $(esc(f))) - register_nonlinear_operator( + add_nonlinear_operator( $(esc(model)), $(esc(dim)), $(esc(f)), diff --git a/src/variables.jl b/src/variables.jl index f5b4026a908..f6cd0c0be2f 100644 --- a/src/variables.jl +++ b/src/variables.jl @@ -2043,7 +2043,7 @@ There are three common mistakes that lead to this. ```julia foo(x) = x $(sym) 1 ? 0 : 1 - x model = Model() - @register(model, op_f, 1, foo) + @operator(model, op_f, 1, foo) @variable(model, x) @expression(model, op_f(x)) ``` diff --git a/test/perf/NonlinearBenchmark.jl b/test/perf/NonlinearBenchmark.jl index 89484dd96ad..2ce3e66c49a 100644 --- a/test/perf/NonlinearBenchmark.jl +++ b/test/perf/NonlinearBenchmark.jl @@ -405,7 +405,7 @@ function perf_nlexpr_model_nested_problems() model = Model(Ipopt.Optimizer) set_silent(model) @variable(model, x[1:2] >= 0) - @register(model, f_V, 2, V, ∇V, ∇²V) + @operator(model, f_V, 2, V, ∇V, ∇²V) @objective(model, Min, x[1]^2 + x[2]^2 + f_V(x[1], x[2])) optimize!(model) solution_summary(model) diff --git a/test/test_nlp_expr.jl b/test/test_nlp_expr.jl index 56a4fb1508a..53b6704073e 100644 --- a/test/test_nlp_expr.jl +++ b/test/test_nlp_expr.jl @@ -478,7 +478,7 @@ end function test_register_univariate() model = Model() @variable(model, x) - @register(model, f, 1, x -> x^2) + @operator(model, f, 1, x -> x^2) @test f isa NonlinearOperator @test sprint(show, f) == "NonlinearOperator(:f, $(f.func))" @test isequal_canonical(@expression(model, f(x)), f(x)) @@ -491,9 +491,9 @@ end function test_register_eval_non_jump() model = Model() @variable(model, x) - @register(model, f, 1, x -> x^2) + @operator(model, f, 1, x -> x^2) @test f(2.0) == 4.0 - @register(model, g, 2, (x, y) -> x^2 - sin(y)) + @operator(model, g, 2, (x, y) -> x^2 - sin(y)) @test g(2.0, 3.0) == 4.0 - sin(3.0) return end @@ -501,7 +501,7 @@ end function test_register_univariate_gradient() model = Model() @variable(model, x) - @register(model, f, 1, x -> x^2, x -> 2 * x) + @operator(model, f, 1, x -> x^2, x -> 2 * x) @test isequal_canonical(@expression(model, f(x)), f(x)) @test isequal_canonical(f(x), GenericNonlinearExpr(:f, Any[x])) attrs = MOI.get(model, MOI.ListOfModelAttributesSet()) @@ -512,7 +512,7 @@ end function test_register_univariate_gradient_hessian() model = Model() @variable(model, x) - @register(model, f, 1, x -> x^2, x -> 2 * x, x -> 2.0) + @operator(model, f, 1, x -> x^2, x -> 2 * x, x -> 2.0) @test isequal_canonical(@expression(model, f(x)), f(x)) @test isequal_canonical(f(x), GenericNonlinearExpr(:f, Any[x])) attrs = MOI.get(model, MOI.ListOfModelAttributesSet()) @@ -524,7 +524,7 @@ function test_register_multivariate() model = Model() @variable(model, x[1:2]) f = (x...) -> sum(x .^ 2) - @register(model, foo, 2, f) + @operator(model, foo, 2, f) @test isequal_canonical(@expression(model, foo(x...)), foo(x...)) @test isequal_canonical(foo(x...), GenericNonlinearExpr(:foo, Any[x...])) attrs = MOI.get(model, MOI.ListOfModelAttributesSet()) @@ -537,7 +537,7 @@ function test_register_multivariate_gradient() @variable(model, x[1:2]) f = (x...) -> sum(x .^ 2) ∇f = (g, x...) -> (g .= 2 .* x) - @register(model, foo, 2, f, ∇f) + @operator(model, foo, 2, f, ∇f) @test isequal_canonical(@expression(model, foo(x...)), foo(x...)) @test isequal_canonical(foo(x...), GenericNonlinearExpr(:foo, Any[x...])) attrs = MOI.get(model, MOI.ListOfModelAttributesSet()) @@ -555,7 +555,7 @@ function test_register_multivariate_gradient_hessian() H[i, i] = 2.0 end end - @register(model, foo, 2, f, ∇f, ∇²f) + @operator(model, foo, 2, f, ∇f, ∇²f) @test isequal_canonical(@expression(model, foo(x...)), foo(x...)) @test isequal_canonical(foo(x...), GenericNonlinearExpr(:foo, Any[x...])) attrs = MOI.get(model, MOI.ListOfModelAttributesSet()) @@ -567,7 +567,7 @@ function test_register_multivariate_many_args() model = Model() @variable(model, x[1:10]) f = (x...) -> sum(x .^ 2) - @register(model, foo, 10, f) + @operator(model, foo, 10, f) @test isequal_canonical(foo(x...), GenericNonlinearExpr(:foo, Any[x...])) @test foo((1:10)...) == 385 return @@ -578,12 +578,12 @@ function test_register_errors() f = x -> x^2 @test_throws( ErrorException( - "Unable to register operator foo: invalid number of " * + "Unable to add operator foo: invalid number of " * "functions provided. Got 4, but expected 1 (if function only), " * "2 (if function and gradient), or 3 (if function, gradient, and " * "hesssian provided)", ), - @register(model, foo, 2, f, f, f, f), + @operator(model, foo, 2, f, f, f, f), ) return end @@ -615,8 +615,8 @@ function test_value_expression() @test value(f, sin(x^2 + x + 1)) ≈ sin(1.1^2 + 1.1 + 1) foo(x) = (x - 1)^2 bar(x, y) = sqrt(x - y) - @register(model, my_foo, 1, foo) - @register(model, my_bar, 2, bar) + @operator(model, my_foo, 1, foo) + @operator(model, my_bar, 2, bar) @test value(f, my_foo(x)) ≈ (1.1 - 1)^2 @test value(f, my_foo(x + 1)) ≈ (1.1 + 1 - 1)^2 @test value(f, my_foo(x^2 + 1)) ≈ (1.1^2 + 1 - 1)^2 @@ -627,8 +627,8 @@ function test_value_expression() bad_udf = NonlinearOperator(:bad_udf, f) @test_throws( ErrorException( - "Unable to evaluate nonlinear operator bad_udf because it is not " * - "registered", + "Unable to evaluate nonlinear operator bad_udf because it was " * + "not added as an operator.", ), value(f, bad_udf(x)), ) @@ -824,7 +824,7 @@ function test_redefinition_of_function() catch err err end - @test_throws(err, @register(model, f, 1, f)) + @test_throws(err, @operator(model, f, 1, f)) return end From fa036afbbec1e85fe42b7ee502ba4575863a4e1c Mon Sep 17 00:00:00 2001 From: odow Date: Wed, 30 Aug 2023 09:38:50 +1200 Subject: [PATCH 22/23] Minor updates --- docs/src/manual/nonlinear.md | 9 ++++----- src/nlp_expr.jl | 8 ++++---- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/docs/src/manual/nonlinear.md b/docs/src/manual/nonlinear.md index ae4ace22b9d..df4f63348db 100644 --- a/docs/src/manual/nonlinear.md +++ b/docs/src/manual/nonlinear.md @@ -353,9 +353,8 @@ op_square_2 = model[:op_square] ### Add an operator without macros -The [`@operator`](@ref) macro is syntactic sugar for the -[`add_nonlinear_operator`](@ref) method. Thus, the non-macro version of the -preceding example is: +The [`@operator`](@ref) macro is syntactic sugar for [`add_nonlinear_operator`](@ref). +Thus, the non-macro version of the preceding example is: ```@repl using JuMP @@ -534,7 +533,7 @@ your function which are `Float64`. ```julia function bad_f(x...) y = zeros(length(x)) # This constructs an array of `Float64`! - for i = 1:length(x) + for i in 1:length(x) y[i] = x[i]^i end return sum(y) @@ -542,7 +541,7 @@ end function good_f(x::T...) where {T<:Real} y = zeros(T, length(x)) # Construct an array of type `T` instead! - for i = 1:length(x) + for i in 1:length(x) y[i] = x[i]^i end return sum(y) diff --git a/src/nlp_expr.jl b/src/nlp_expr.jl index f16e867c6bb..6fe42fcb5ee 100644 --- a/src/nlp_expr.jl +++ b/src/nlp_expr.jl @@ -885,8 +885,8 @@ If `dim > 1`, then the method signatures of each function must be: * `∇²f(H::AbstractMatrix{T}, x::T...)::Nothing where {T<:Real}` Where the gradient vector `g` and Hessian matrix `H` are filled in-place. For -the Hessian, you must fill in the non-zero upper-triangular entries only. -Setting an off-diagonal lower-triangular element may error. +the Hessian, you must fill in the non-zero lower-triangular entries only. +Setting an off-diagonal upper-triangular element may error. ## Example @@ -1001,8 +1001,8 @@ If `dim > 1`, then the method signatures of each function must be: * `∇²f(H::AbstractMatrix{T}, x::T...)::Nothing where {T<:Real}` Where the gradient vector `g` and Hessian matrix `H` are filled in-place. For -the Hessian, you must fill in the non-zero upper-triangular entries only. -Setting an off-diagonal lower-triangular element may error. +the Hessian, you must fill in the non-zero lower-triangular entries only. +Setting an off-diagonal upper-triangular element may error. ## Example From 475aba4b7312db91c041320e271e4fa71165d95f Mon Sep 17 00:00:00 2001 From: Oscar Dowson Date: Wed, 30 Aug 2023 12:55:03 +1200 Subject: [PATCH 23/23] Update nonlinear.md --- docs/src/manual/nonlinear.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/src/manual/nonlinear.md b/docs/src/manual/nonlinear.md index df4f63348db..3c3f665108a 100644 --- a/docs/src/manual/nonlinear.md +++ b/docs/src/manual/nonlinear.md @@ -275,16 +275,16 @@ ifelse((x < -1) || (x >= 1), x², 0.0) The available functions are: -| JuMP function | Julia function | -| :--------------------------------- | :------------- | -| [`op_ifelse`](@ref) | `ifelse` | -| [`op_and`](@ref) | `&&` | -| [`op_or`](@ref) | `\|\|` | -| [`op_greater_than_or_equal_to`](@ref) | `>=` | -| [`op_less_than_or_equal_to`](@ref) | `<=` | -| [`op_equal_to`](@ref) | `==` | -| [`op_strictly_greater_than`](@ref) | `>` | -| [`op_strictly_less_than`](@ref) | `<` | +| JuMP function | Julia function | +| :------------------------------------ | :------------- | +| [`op_ifelse`](@ref) | `ifelse` | +| [`op_and`](@ref) | `&&` | +| [`op_or`](@ref) | `\|\|` | +| [`op_greater_than_or_equal_to`](@ref) | `>=` | +| [`op_less_than_or_equal_to`](@ref) | `<=` | +| [`op_equal_to`](@ref) | `==` | +| [`op_strictly_greater_than`](@ref) | `>` | +| [`op_strictly_less_than`](@ref) | `<` | ### Fields