diff --git a/docs/src/guide/expression.md b/docs/src/guide/expression.md index 8d4199ca..06a0efc7 100644 --- a/docs/src/guide/expression.md +++ b/docs/src/guide/expression.md @@ -4,33 +4,33 @@ DocTestFilters = [r"≤|<=", r" == | = ", r" ∈ | in ", r"MathOptInterface|MOI" ``` # [Expressions](@id expr_docs) -A guide for the defining and understanding the variable expressions -used in `InfiniteOpt`. See the [technical manual](@ref expr_manual) for more +A guide for the defining and understanding the variable expressions +used in `InfiniteOpt`. See the [technical manual](@ref expr_manual) for more details. -!!! note - Nonlinear modeling is now handled in `InfiniteOpt` via `JuMP`'s new - nonlinear interface. See [Nonlinear Expressions](@ref nlp_guide) for - more information. +!!! note + Nonlinear modeling is now handled in `InfiniteOpt` via `JuMP`'s new + nonlinear interface. See [Nonlinear Expressions](@ref nlp_guide) for + more information. ## Overview -Expressions in `InfiniteOpt` (also called functions) refer to mathematical -statements involving variables and numbers. Thus, these comprise the -mathematical expressions used that are used in measures, objectives, and -constraints. Programmatically, `InfiniteOpt` simply extends `JuMP` expression -types and methods principally pertaining to affine and quadratic mathematical -expressions. A natively supported abstraction for general nonlinear expressions +Expressions in `InfiniteOpt` (also called functions) refer to mathematical +statements involving variables and numbers. Thus, these comprise the +mathematical expressions used that are used in measures, objectives, and +constraints. Programmatically, `InfiniteOpt` simply extends `JuMP` expression +types and methods principally pertaining to affine and quadratic mathematical +expressions. A natively supported abstraction for general nonlinear expressions is planned for development since that of `JuMP` is not readily extendable. ## [Parameter Functions](@id par_func_docs) -As described further below, InfiniteOpt.jl only supports affine and quadratic -expressions in its current rendition. However, there several use cases where we -might want to provide a more complex known function of infinite parameter(s) (e.g., -nonlinear setpoint tracking). Thus, we provide parameter function objects -that given a particular realization of infinite parameters will output a scalar -value. Note that this can be interpreted as an infinite variable that is -constrained to a particular known function. This is accomplished via -[`@parameter_function`](@ref) or [`parameter_function`](@ref) and is exemplified +As described further below, InfiniteOpt.jl only supports affine and quadratic +expressions in its current rendition. However, there several use cases where we +might want to provide a more complex known function of infinite parameter(s) (e.g., +nonlinear setpoint tracking). Thus, we provide parameter function objects +that given a particular realization of infinite parameters will output a scalar +value. Note that this can be interpreted as an infinite variable that is +constrained to a particular known function. This is accomplished via +[`@parameter_function`](@ref) or [`parameter_function`](@ref) and is exemplified by defining a parameter function `f(t)` that uses `sin(t)`: ```jldoctest param_func julia> using InfiniteOpt; @@ -42,9 +42,9 @@ julia> @infinite_parameter(model, t in [0, 10]); julia> @parameter_function(model, f == sin(t)) f(t) ``` -Here we created a parameter function object, added it to `model`, and -then created a Julia variable `f` that serves as a `GeneralVariableRef` that points -to it. From here we can treat `f` as a normal infinite variable and use it with +Here we created a parameter function object, added it to `model`, and +then created a Julia variable `f` that serves as a `GeneralVariableRef` that points +to it. From here we can treat `f` as a normal infinite variable and use it with measures, derivatives, and constraints. For example, we can do the following: ```jldoctest param_func julia> @variable(model, y, Infinite(t)); @@ -58,7 +58,7 @@ julia> meas = integral(y - f, t) julia> @constraint(model, y - f <= 0) y(t) - f(t) ≤ 0, ∀ t ∈ [0, 10] ``` -We can also define parameter functions that depend on multiple infinite +We can also define parameter functions that depend on multiple infinite parameters even use an anonymous function if preferred: ```jldoctest param_func julia> @infinite_parameter(model, x[1:2] in [-1, 1]); @@ -66,9 +66,9 @@ julia> @infinite_parameter(model, x[1:2] in [-1, 1]); julia> @parameter_function(model, myname == (t, x) -> t + sum(x)) myname(t, x) ``` -In many applications, we may also desire to define an array of parameter functions -that each use a different realization of some parent function by varying some -additional positional/keyword arguments. We readily support this behavior since +In many applications, we may also desire to define an array of parameter functions +that each use a different realization of some parent function by varying some +additional positional/keyword arguments. We readily support this behavior since parameter functions can be defined with additional known arguments: ```jldoctest param_func julia> @parameter_function(model, pfunc_alt[i = 1:3] == t -> mysin(t, as[i], b = 0)) @@ -77,24 +77,24 @@ julia> @parameter_function(model, pfunc_alt[i = 1:3] == t -> mysin(t, as[i], b = pfunc_alt[2](t) pfunc_alt[3](t) ``` -The main recommended use case for [`parameter_function`](@ref) is that it is -amenable to define complex anonymous functions via a do-block which is useful +The main recommended use case for [`parameter_function`](@ref) is that it is +amenable to define complex anonymous functions via a do-block which is useful for applications like defining a time-varied setpoint: ```jldoctest param_func julia> setpoint = parameter_function(t, name = "setpoint") do t_supp if t_supp <= 5 return 2.0 - else + else return 10.2 end end setpoint(t) ``` -Please consult the following links for more information about defining parameter +Please consult the following links for more information about defining parameter functions: [`@parameter_function`](@ref) and [`parameter_function`](@ref). -Beyond this, there are a number of query and modification methods that can be -employed for parameter functions and these are detailed in the +Beyond this, there are a number of query and modification methods that can be +employed for parameter functions and these are detailed in the [technical manual](@ref par_func_manual) Section below. ## Variable Hierarchy @@ -129,14 +129,14 @@ An affine expression pertains to a mathematical function of the form: ```math f_a(x) = a_1x_1 + ... + a_nx_n + b ``` -where ``x \in \mathbb{R}^n`` denote variables, ``a \in \mathbb{R}^n`` denote -coefficients, and ``b \in \mathbb{R}`` denotes a constant value. Such -expressions, are prevalent in any problem than involves linear constraints +where ``x \in \mathbb{R}^n`` denote variables, ``a \in \mathbb{R}^n`` denote +coefficients, and ``b \in \mathbb{R}`` denotes a constant value. Such +expressions, are prevalent in any problem than involves linear constraints and/or objectives. -In `InfiniteOpt`, affine expressions can be defined directly -using `Julia`'s arithmetic operators (i.e., `+`, `-`, `*`, etc.) or using -`@expression`. For example, let's define the expression +In `InfiniteOpt`, affine expressions can be defined directly +using `Julia`'s arithmetic operators (i.e., `+`, `-`, `*`, etc.) or using +`@expression`. For example, let's define the expression ``2y(t) + z - 3t`` noting that the following methods are equivalent: ```jldoctest affine; setup = :(using InfiniteOpt; model = InfiniteModel()) julia> @infinite_parameter(model, t in [0, 10]) @@ -160,15 +160,15 @@ julia> expr = @expression(model, 2y + z - 3t) julia> typeof(expr) GenericAffExpr{Float64, GeneralVariableRef} ``` -Notice that coefficients to variables can simply be put alongside variables -without having to use the `*` operator. Also, note that all of these expressions -are stored in a container referred to as a `GenericAffExpr` which is a `JuMP` +Notice that coefficients to variables can simply be put alongside variables +without having to use the `*` operator. Also, note that all of these expressions +are stored in a container referred to as a `GenericAffExpr` which is a `JuMP` object for storing affine expressions. !!! note - Where possible, it is preferable to use - [`@expression`](https://jump.dev/JuMP.jl/v1/api/JuMP/#JuMP.@expression) - for defining expressions as it is much more efficient than explicitly using + Where possible, it is preferable to use + [`@expression`](https://jump.dev/JuMP.jl/v1/api/JuMP/#JuMP.@expression) + for defining expressions as it is much more efficient than explicitly using the standard operators. `GenericAffExpr` objects contain 2 fields which are: @@ -185,10 +185,10 @@ OrderedCollections.OrderedDict{GeneralVariableRef, Float64} with 3 entries: julia> expr.constant 0.0 ``` -Notice that the ordered dictionary preserves the order in which the variables +Notice that the ordered dictionary preserves the order in which the variables appear in the expression. -More information can be found in the documentation for affine expressions in +More information can be found in the documentation for affine expressions in [`JuMP`](https://jump.dev/JuMP.jl/v1/api/JuMP/#JuMP.GenericAffExpr). ## Quadratic Expressions @@ -212,8 +212,8 @@ julia> expr = @expression(model, 2y^2 - z * y + 42t - 3) julia> typeof(expr) GenericQuadExpr{Float64, GeneralVariableRef} ``` -Again, notice that coefficients need not employ `*`. Also, the object used to -store the expression is a `GenericQuadExpr` which is a `JuMP` object used for +Again, notice that coefficients need not employ `*`. Also, the object used to +store the expression is a `GenericQuadExpr` which is a `JuMP` object used for storing quadratic expressions. `GenericQuadExpr` object contains 2 data fields which are: @@ -222,7 +222,7 @@ storing quadratic expressions. Here the `UnorderedPair` type is unique to `JuMP` and contains the fields: - `a::AbstractVariableRef` One variable in a quadratic pair - `b::AbstractVariableRef` The other variable in a quadratic pair. -Thus, this form can be used to store arbitrary quadratic expressions. For +Thus, this form can be used to store arbitrary quadratic expressions. For example, let's look at what these fields look like in the above example: ```jldoctest affine julia> expr.aff @@ -238,21 +238,21 @@ OrderedCollections.OrderedDict{UnorderedPair{GeneralVariableRef}, Float64} with ``` Notice again that the ordered dictionary preserves the order. -More information can be found in the documentation for quadratic expressions in +More information can be found in the documentation for quadratic expressions in [`JuMP`](https://jump.dev/JuMP.jl/v1/api/JuMP/#JuMP.GenericQuadExpr). ## [Nonlinear Expressions](@id nlp_guide) -In this section, we walk you through the ins and out of working with general +In this section, we walk you through the ins and out of working with general nonlinear (i.e., not affine or quadratic) expressions in `InfiniteOpt`. !!! info - Our previous `InfiniteOpt` specific nonlinear API as been removed in - favor of `JuMP`'s new and improved nonlinear interface. Thus, `InfiniteOpt` + Our previous `InfiniteOpt` specific nonlinear API as been removed in + favor of `JuMP`'s new and improved nonlinear interface. Thus, `InfiniteOpt` now strictly uses the same expression structures as `JuMP`. -### Basic Usage -We can define nonlinear expressions in similar manner to how affine/quadratic -expressions are made in `JuMP`. For instance, we can make an expression using +### Basic Usage +We can define nonlinear expressions in similar manner to how affine/quadratic +expressions are made in `JuMP`. For instance, we can make an expression using normal Julia code outside a macro: ```jldoctest nlp; setup = :(using InfiniteOpt; model = InfiniteModel()) julia> @infinite_parameter(model, t ∈ [0, 1]); @variable(model, y, Infinite(t)); @@ -263,10 +263,10 @@ julia> expr = exp(y^2.3) * y - 42 julia> typeof(expr) GenericNonlinearExpr{GeneralVariableRef} ``` -Thus, the nonlinear expression `expr` of type -[`GenericNonlinearExpr`](https://jump.dev/JuMP.jl/v1/api/JuMP/#GenericNonlinearExpr) -is created and can be readily incorporated into other expressions, the objective, -and/or constraints. For macro-based definition, we simply use the `@expression`, +Thus, the nonlinear expression `expr` of type +[`GenericNonlinearExpr`](https://jump.dev/JuMP.jl/v1/api/JuMP/#GenericNonlinearExpr) +is created and can be readily incorporated into other expressions, the objective, +and/or constraints. For macro-based definition, we simply use the `@expression`, `@objective`, and `@constraint` macros as normal: ```jldoctest nlp julia> @expression(model, expr, exp(y^2.3) * y - 42) @@ -284,15 +284,15 @@ constr : (((y(t) ^ y(t)) * sin(y(t))) + (y(t) ^ 3) + (y(t) ^ 4)) - 3.0 = 0, ∀ The legacy `@NLexpression`, `@NLobjective`, and `@NLconstraint` macros in `JuMP` are not supported by `InfiniteOpt`. -Natively, we support all the same nonlinear operators that `JuMP` -does. See [JuMP's documentation](https://jump.dev/JuMP.jl/v1/manual/nonlinear/#Supported-operators) +Natively, we support all the same nonlinear operators that `JuMP` +does. See [JuMP's documentation](https://jump.dev/JuMP.jl/v1/manual/nonlinear/#Supported-operators) for more information. -We can interrogate which nonlinear operators our model currently -supports by invoking [`all_nonlinear_operators`](@ref). Moreover, we can add -additional operators (see [Adding Nonlinear Operators](@ref) for more details). +We can interrogate which nonlinear operators our model currently +supports by invoking [`all_nonlinear_operators`](@ref). Moreover, we can add +additional operators (see [Adding Nonlinear Operators](@ref) for more details). -Finally, we highlight that nonlinear expressions in `InfiniteOpt` support the +Finally, we highlight that nonlinear expressions in `InfiniteOpt` support the same linear algebra operations as affine/quadratic expressions: ```jldoctest nlp julia> @variable(model, v[1:2]); @variable(model, Q[1:2, 1:2]); @@ -302,8 +302,8 @@ julia> @expression(model, v' * Q * v) ``` ### Function Tracing -In similar manner to `Symbolics.jl`, we support function tracing. This means -that we can create nonlinear modeling expression using Julia functions that +In similar manner to `Symbolics.jl`, we support function tracing. This means +that we can create nonlinear modeling expression using Julia functions that satisfy certain criteria. For instance: ```jldoctest nlp julia> myfunc(x) = sin(x^3) / tan(2^x); @@ -311,19 +311,19 @@ julia> myfunc(x) = sin(x^3) / tan(2^x); julia> expr = myfunc(y) sin(y(t) ^ 3) / tan(2.0 ^ y(t)) ``` -However, there are certain limitations as to what internal code these functions +However, there are certain limitations as to what internal code these functions can contain. The following CANNOT be used: - loops (unless it only uses very simple operations) - if-statements (see workaround below) - unrecognized operators (if they cannot be traced). !!! tip - If a particular function is not amendable for tracing, try adding it - as a new nonlinear operator instead. See [Adding Nonlinear Operators](@ref) + If a particular function is not amendable for tracing, try adding it + as a new nonlinear operator instead. See [Adding Nonlinear Operators](@ref) for details. -We can readily work around the if-statement limitation using `op_ifelse` which -is a nonlinear operator version of `Base.ifelse` and follows the same syntax. +We can readily work around the if-statement limitation using `op_ifelse` which +is a nonlinear operator version of `Base.ifelse` and follows the same syntax. For example, the function: ```julia function mylogicfunc(x) @@ -344,20 +344,20 @@ mylogicfunc (generic function with 1 method) julia> mylogicfunc(y) ifelse(y(t) >= 0, y(t) ^ 3, 0) ``` -which is amendable for function tracing. Note that the basic logic operators -(e.g., `<=`) have special nonlinear operator analogues when used outside of a -macro. See [JuMP's documentation](https://jump.dev/JuMP.jl/v1/manual/nonlinear/#Limitations) +which is amendable for function tracing. Note that the basic logic operators +(e.g., `<=`) have special nonlinear operator analogues when used outside of a +macro. See [JuMP's documentation](https://jump.dev/JuMP.jl/v1/manual/nonlinear/#Limitations) for more details. ### Linear Algebra -As described above in the Basic Usage Section, we support basic linear algebra -operations with nonlinear expressions! This relies on our basic extensions of -[`MutableArithmetics`](https://github.com/jump-dev/MutableArithmetics.jl), but -admittedly this implementation is not perfect in terms of efficiency. - -!!! tip - Using linear algebra operations with nonlinear expression provides user - convenience, but is less efficient than using `sum`s. Thus, `sum` should be +As described above in the Basic Usage Section, we support basic linear algebra +operations with nonlinear expressions! This relies on our basic extensions of +[`MutableArithmetics`](https://github.com/jump-dev/MutableArithmetics.jl), but +admittedly this implementation is not perfect in terms of efficiency. + +!!! tip + Using linear algebra operations with nonlinear expression provides user + convenience, but is less efficient than using `sum`s. Thus, `sum` should be used instead when efficiency is critical. ```jldoctest nlp julia> v' * Q * v # convenient linear algebra syntax @@ -367,7 +367,7 @@ admittedly this implementation is not perfect in terms of efficiency. ((((v[1]*Q[1,1]) * v[1]) + ((v[2]*Q[2,1]) * v[1])) + ((v[1]*Q[1,2]) * v[2])) + ((v[2]*Q[2,2]) * v[2]) ``` -We can also set vectorized constraints using the `.==`, `.<=`, and `.>=` +We can also set vectorized constraints using the `.==`, `.<=`, and `.>=` operators: ```jldoctest nlp julia> @variable(model, W[1:2, 1:2]); @@ -380,10 +380,10 @@ julia> @constraint(model, W * Q * v .== 0) ### Adding Nonlinear Operators In a similar spirit to `JuMP` and `Symbolics`, we can add nonlinear operators -such that they can be directly incorporated into nonlinear expressions as atoms -(they will not be traced). This is done via the -[`@operator`](https://jump.dev/JuMP.jl/v1/api/JuMP/#@operator) macro. We can -register any operator that takes scalar arguments (which can accept inputs of +such that they can be directly incorporated into nonlinear expressions as atoms +(they will not be traced). This is done via the +[`@operator`](https://jump.dev/JuMP.jl/v1/api/JuMP/#@operator) macro. We can +register any operator that takes scalar arguments (which can accept inputs of type `Real`): ```jldoctest nlp julia> h(a, b) = a * b^2; # an overly simple example operator @@ -395,11 +395,11 @@ op_h(y(t), 42) ``` !!! tip - Where possible it is preferred to use function tracing instead. This improves - performance and can prevent unintentional errors. + Where possible it is preferred to use function tracing instead. This improves + performance and can prevent unintentional errors. See [Function Tracing](@ref) for more details. -To highlight the difference between function tracing and operator definition +To highlight the difference between function tracing and operator definition consider the following example: ```jldoctest nlp julia> f(a) = a^3; @@ -408,18 +408,18 @@ julia> f(y) # user-function gets traced y(t) ^ 3 julia> @operator(model, op_f, 1, f) # create nonlinear operator -NonlinearOperator(:op_f, f) +NonlinearOperator(f, :op_f) julia> op_f(y) # function is no longer traced op_f(y(t)) ``` -Thus, nonlinear operators are incorporated directly. This means that their -gradients and hessians will need to determined as well (typically occurs -behind the scenes via auto-differentiation with the selected optimizer model -backend). However, again please note that in this case tracing is preferred -since `f` can be traced. +Thus, nonlinear operators are incorporated directly. This means that their +gradients and hessians will need to determined as well (typically occurs +behind the scenes via auto-differentiation with the selected optimizer model +backend). However, again please note that in this case tracing is preferred +since `f` can be traced. -Let's consider a more realistic example where the function is not amenable to +Let's consider a more realistic example where the function is not amenable to tracing: ```jldoctest nlp julia> function g(a) @@ -438,23 +438,23 @@ julia> @operator(model, op_g, 1, g); julia> op_g(y) op_g(y(t)) ``` -Notice this example is a little contrived still, highlighting that in most cases -we can avoid adding operators. However, one exception to this trend, are functions -from other packages that we might want to use. For example, perhaps we would -like to use the `eta` function from `SpecialFunctions.jl` which is not natively +Notice this example is a little contrived still, highlighting that in most cases +we can avoid adding operators. However, one exception to this trend, are functions +from other packages that we might want to use. For example, perhaps we would +like to use the `eta` function from `SpecialFunctions.jl` which is not natively supported: ```jldoctest nlp julia> using SpecialFunctions julia> @operator(model, op_eta, 1, eta) -NonlinearOperator(:op_eta, eta) +NonlinearOperator(eta, :op_eta) julia> op_eta(y) op_eta(y(t)) ``` -Now in some cases we might wish to specify the gradient and hessian of a -univariate operator to avoid the need for auto-differentiation. We +Now in some cases we might wish to specify the gradient and hessian of a +univariate operator to avoid the need for auto-differentiation. We can do this, simply by adding them as additional arguments in `@operator`: ```jldoctest nlp julia> my_squared(a) = a^2; gradient(a) = 2 * a; hessian(a) = 2; @@ -464,10 +464,10 @@ julia> @operator(model, op_square, 1, my_squared, gradient, hessian); julia> op_square(y) op_square(y(t)) ``` -Note the specification of the hessian is optional (it can separately be +Note the specification of the hessian is optional (it can separately be computed via auto-differentiation if need be). -For multivariate functions, we can specify the gradient following the same +For multivariate functions, we can specify the gradient following the same gradient function structure that `JuMP` uses: ```jldoctest nlp julia> w(a, b) = a * b^2; @@ -478,19 +478,19 @@ julia> function wg(v, a, b) return end; -julia> @operator(model, op_w, 2, w, wg) -NonlinearOperator(:op_w, w) +julia> @operator(model, op_w, 2, w, wg) +NonlinearOperator(w, :op_w) julia> op_w(42, y) op_w(42, y(t)) ``` -Note that the first argument of the gradient needs to accept an +Note that the first argument of the gradient needs to accept an `AbstractVector{Real}` that is then filled in place. !!! note - We do not currently support vector inputs or vector valued functions + We do not currently support vector inputs or vector valued functions directly, since typically `JuMP` optimizer model backends don't support them. ### More Details -For more details, please consult +For more details, please consult [JuMP's Documentation](https://jump.dev/JuMP.jl/v1/manual/nonlinear/). diff --git a/src/nlp.jl b/src/nlp.jl index df91db45..0055ead8 100644 --- a/src/nlp.jl +++ b/src/nlp.jl @@ -2,7 +2,7 @@ # USER OPERATORS ################################################################################ # Keep track of the predefined functions in MOI -const _NativeNLPOperators = append!(copy(MOI.Nonlinear.DEFAULT_UNIVARIATE_OPERATORS), +const _NativeNLPOperators = append!(copy(MOI.Nonlinear.DEFAULT_UNIVARIATE_OPERATORS), MOI.Nonlinear.DEFAULT_MULTIVARIATE_OPERATORS) append!(_NativeNLPOperators, (:&&, :||, :<=, :(==), :>=, :<, :>)) @@ -16,10 +16,10 @@ append!(_NativeNLPOperators, (:&&, :||, :<=, :(==), :>=, :<, :>)) [name::Symbol = Symbol(f)] ) -Extend `add_nonlinear_operator` for `InfiniteModel`s. +Extend `add_nonlinear_operator` for `InfiniteModel`s. Add a new nonlinear operator with `dim` input arguments to `model` and associate -it with the name `name`. Alternatively, [`@operator`](https://jump.dev/JuMP.jl/v1/api/JuMP/#@operator) +it with the name `name`. Alternatively, [`@operator`](https://jump.dev/JuMP.jl/v1/api/JuMP/#@operator) can be used for a more convenient syntax. The function `f` evaluates the operator. The optional function `∇f` evaluates @@ -32,7 +32,7 @@ julia> @variable(model, y); julia> g(x) = x^2; julia> new_op = add_nonlinear_operator(model, 1, g) -NonlinearOperator(:g, g) +NonlinearOperator(g, :g) julia> @expression(model, new_op(y)) g(y) @@ -54,13 +54,13 @@ function JuMP.add_nonlinear_operator( push!(model.operators, NLPOperator(name, dim, f, funcs...)) model.op_lookup[name] = (f, dim) # TODO should we set the optimizer model to be out of date? - return JuMP.NonlinearOperator(name, f) + return JuMP.NonlinearOperator(f, name) end """ - name_to_operator(model::InfiniteModel, name::Symbol)::Union{Function, Nothing} + name_to_operator(model::InfiniteModel, name::Symbol)::Union{Function, Nothing} -Return the nonlinear operator that corresponds to `name`. +Return the nonlinear operator that corresponds to `name`. Returns `nothing` if no such operator exists. !!! warning @@ -76,14 +76,14 @@ end Retrieve all the operators that are currently added to `model`. """ -function all_nonlinear_operators(model::InfiniteModel) +function all_nonlinear_operators(model::InfiniteModel) return append!(copy(_NativeNLPOperators), map(v -> Symbol(first(v)), values(model.op_lookup))) end """ user_defined_operators(model::InfiniteModel)::Vector{NLPOperator} -Return all the operators (and their associated information) that the user has +Return all the operators (and their associated information) that the user has added to `model`. Each is stored as a [`NLPOperator`](@ref). """ function added_nonlinear_operators(model::InfiniteModel) @@ -93,7 +93,7 @@ end ## Define helper function to add nonlinear operators to JuMP # No gradient or hessian function _add_op_data_to_jump( - model::JuMP.Model, + model::JuMP.Model, data::NLPOperator{F, Nothing, Nothing} ) where {F <: Function} JuMP.add_nonlinear_operator(model, data.dim, data.f, name = data.name) @@ -102,7 +102,7 @@ end # Only gradient information function _add_op_data_to_jump( - model::JuMP.Model, + model::JuMP.Model, data::NLPOperator{F, G, Nothing} ) where {F <: Function, G <: Function} JuMP.add_nonlinear_operator(model, data.dim, data.f, data.∇f, name = data.name) @@ -118,8 +118,8 @@ end """ add_operators_to_jump(opt_model::JuMP.Model, inf_model::InfiniteModel)::Nothing -Add the additional nonlinear operators in `inf_model` to a `JuMP` model `opt_model`. -This is intended as an internal method, but it is provided for developers that +Add the additional nonlinear operators in `inf_model` to a `JuMP` model `opt_model`. +This is intended as an internal method, but it is provided for developers that extend `InfiniteOpt` to use other optimizer models. """ function add_operators_to_jump(opt_model::JuMP.Model, inf_model::InfiniteModel)