diff --git a/dev/.documenter-siteinfo.json b/dev/.documenter-siteinfo.json index 4c0bb8fe7d1..f8526560a7a 100644 --- a/dev/.documenter-siteinfo.json +++ b/dev/.documenter-siteinfo.json @@ -1 +1 @@ -{"documenter":{"julia_version":"1.11.1","generation_timestamp":"2024-11-18T05:20:16","documenter_version":"1.7.0"}} \ No newline at end of file +{"documenter":{"julia_version":"1.11.1","generation_timestamp":"2024-11-19T20:25:24","documenter_version":"1.7.0"}} \ No newline at end of file diff --git a/dev/JuMP.pdf b/dev/JuMP.pdf index c7dfbecb2e3..b7ebbd8f8bb 100644 Binary files a/dev/JuMP.pdf and b/dev/JuMP.pdf differ diff --git a/dev/api/JuMP.Containers/index.html b/dev/api/JuMP.Containers/index.html index ac0f4aa00de..4cc1402f097 100644 --- a/dev/api/JuMP.Containers/index.html +++ b/dev/api/JuMP.Containers/index.html @@ -12,7 +12,7 @@ 3 4 julia> array[:b, 3] -4source
DenseAxisArray{T}(undef, axes...) where T

Construct an uninitialized DenseAxisArray with element-type T indexed over the given axes.

Example

julia> array = Containers.DenseAxisArray{Float64}(undef, [:a, :b], 1:2);
+4
source
DenseAxisArray{T}(undef, axes...) where T

Construct an uninitialized DenseAxisArray with element-type T indexed over the given axes.

Example

julia> array = Containers.DenseAxisArray{Float64}(undef, [:a, :b], 1:2);
 
 julia> fill!(array, 1.0)
 2-dimensional DenseAxisArray{Float64,2,...} with index sets:
@@ -34,7 +34,7 @@
     Dimension 2, 1:2
 And data, a 2×2 Matrix{Float64}:
  1.0  5.0
- 1.0  1.0
source

SparseAxisArray

JuMP.Containers.SparseAxisArrayType
struct SparseAxisArray{T,N,K<:NTuple{N, Any}} <: AbstractArray{T,N}
+ 1.0  1.0
source

SparseAxisArray

JuMP.Containers.SparseAxisArrayType
struct SparseAxisArray{T,N,K<:NTuple{N, Any}} <: AbstractArray{T,N}
     data::OrderedCollections.OrderedDict{K,T}
 end

N-dimensional array with elements of type T where only a subset of the entries are defined. The entries with indices idx = (i1, i2, ..., iN) in keys(data) has value data[idx].

Note that, as opposed to SparseArrays.AbstractSparseArray, the missing entries are not assumed to be zero(T), they are simply not part of the array. This means that the result of map(f, sa::SparseAxisArray) or f.(sa::SparseAxisArray) has the same sparsity structure as sa, even if f(zero(T)) is not zero.

Example

julia> using OrderedCollections: OrderedDict
 
@@ -51,7 +51,7 @@
   [b, 3]  =  3.0
 
 julia> array[:b, 3]
-3.0
source

Containers.@container

JuMP.Containers.@containerMacro
@container([i=..., j=..., ...], expr[, container = :Auto])

Create a container with indices i, j, ... and values given by expr that may depend on the value of the indices.

@container(ref[i=..., j=..., ...], expr[, container = :Auto])

Same as above but the container is assigned to the variable of name ref.

The type of container can be controlled by the container keyword.

Note

When the index set is explicitly given as 1:n for any expression n, it is transformed to Base.OneTo(n) before being given to container.

Example

julia> Containers.@container([i = 1:3, j = 1:3], i + j)
+3.0
source

Containers.@container

JuMP.Containers.@containerMacro
@container([i=..., j=..., ...], expr[, container = :Auto])

Create a container with indices i, j, ... and values given by expr that may depend on the value of the indices.

@container(ref[i=..., j=..., ...], expr[, container = :Auto])

Same as above but the container is assigned to the variable of name ref.

The type of container can be controlled by the container keyword.

Note

When the index set is explicitly given as 1:n for any expression n, it is transformed to Base.OneTo(n) before being given to container.

Example

julia> Containers.@container([i = 1:3, j = 1:3], i + j)
 3×3 Matrix{Int64}:
  2  3  4
  3  4  5
@@ -86,7 +86,7 @@
   [1, 3]  =  4
   [2, 2]  =  4
   [2, 3]  =  5
-  [3, 3]  =  6
source

Containers.container

JuMP.Containers.containerFunction
container(f::Function, indices[[, ::Type{C} = AutoContainerType], names])

Create a container of type C with index names names, indices indices and values at given indices given by f.

If the method with names is not specialized on Type{C}, it falls back to calling container(f, indices, c) for backwards compatibility with containers not supporting index names.

Example

julia> Containers.container((i, j) -> i + j, Containers.vectorized_product(Base.OneTo(3), Base.OneTo(3)))
+  [3, 3]  =  6
source

Containers.container

JuMP.Containers.containerFunction
container(f::Function, indices[[, ::Type{C} = AutoContainerType], names])

Create a container of type C with index names names, indices indices and values at given indices given by f.

If the method with names is not specialized on Type{C}, it falls back to calling container(f, indices, c) for backwards compatibility with containers not supporting index names.

Example

julia> Containers.container((i, j) -> i + j, Containers.vectorized_product(Base.OneTo(3), Base.OneTo(3)))
 3×3 Matrix{Int64}:
  2  3  4
  3  4  5
@@ -115,7 +115,7 @@
   [1, 2]  =  3
   [1, 3]  =  4
   [2, 3]  =  5
-  [3, 3]  =  6
source

Containers.rowtable

JuMP.Containers.rowtableFunction
rowtable([f::Function=identity,] x; [header::Vector{Symbol} = Symbol[]])

Applies the function f to all elements of the variable container x, returning the result as a Vector of NamedTuples, where header is a vector containing the corresponding axis names.

If x is an N-dimensional array, there must be N+1 names, so that the last name corresponds to the result of f(x[i]).

If header is left empty, then the default header is [:x1, :x2, ..., :xN, :y].

Info

A Vector of NamedTuples implements the Tables.jl interface, and so the result can be used as input for any function that consumes a 'Tables.jl' compatible source.

Example

julia> model = Model();
+  [3, 3]  =  6
source

Containers.rowtable

JuMP.Containers.rowtableFunction
rowtable([f::Function=identity,] x; [header::Vector{Symbol} = Symbol[]])

Applies the function f to all elements of the variable container x, returning the result as a Vector of NamedTuples, where header is a vector containing the corresponding axis names.

If x is an N-dimensional array, there must be N+1 names, so that the last name corresponds to the result of f(x[i]).

If header is left empty, then the default header is [:x1, :x2, ..., :xN, :y].

Info

A Vector of NamedTuples implements the Tables.jl interface, and so the result can be used as input for any function that consumes a 'Tables.jl' compatible source.

Example

julia> model = Model();
 
 julia> @variable(model, x[i=1:2, j=i:2] >= 0, start = i+j);
 
@@ -129,7 +129,7 @@
 3-element Vector{@NamedTuple{x1::Int64, x2::Int64, y::VariableRef}}:
  (x1 = 1, x2 = 1, y = x[1,1])
  (x1 = 1, x2 = 2, y = x[1,2])
- (x1 = 2, x2 = 2, y = x[2,2])
source

Containers.default_container

JuMP.Containers.default_containerFunction
default_container(indices)

If indices is a NestedIterator, return a SparseAxisArray. Otherwise, indices should be a VectorizedProductIterator and the function returns Array if all iterators of the product are Base.OneTo and returns DenseAxisArray otherwise.

source

Containers.nested

JuMP.Containers.nestedFunction
nested(iterators...; condition = (args...) -> true)

Create a NestedIterator.

Example

julia> iterator = Containers.nested(
+ (x1 = 2, x2 = 2, y = x[2,2])
source

Containers.default_container

JuMP.Containers.default_containerFunction
default_container(indices)

If indices is a NestedIterator, return a SparseAxisArray. Otherwise, indices should be a VectorizedProductIterator and the function returns Array if all iterators of the product are Base.OneTo and returns DenseAxisArray otherwise.

source

Containers.nested

JuMP.Containers.nestedFunction
nested(iterators...; condition = (args...) -> true)

Create a NestedIterator.

Example

julia> iterator = Containers.nested(
            () -> 1:2,
            (i,) -> ["A", "B"];
            condition = (i, j) -> isodd(i) || j == "B",
@@ -139,21 +139,21 @@
 3-element Vector{Tuple{Int64, String}}:
  (1, "A")
  (1, "B")
- (2, "B")
source

Containers.vectorized_product

JuMP.Containers.vectorized_productFunction
vectorized_product(iterators...)

Created a VectorizedProductIterator.

Example

julia> iterator = Containers.vectorized_product(1:2, ["A", "B"]);
+ (2, "B")
source

Containers.vectorized_product

JuMP.Containers.vectorized_productFunction
vectorized_product(iterators...)

Created a VectorizedProductIterator.

Example

julia> iterator = Containers.vectorized_product(1:2, ["A", "B"]);
 
 julia> collect(iterator)
 2×2 Matrix{Tuple{Int64, String}}:
  (1, "A")  (1, "B")
- (2, "A")  (2, "B")
source

Containers.build_error_fn

JuMP.Containers.build_error_fnFunction
build_error_fn(macro_name, args, source)

Return a function that can be used in place of Base.error, but which additionally prints the macro from which it was called.

source

Containers.parse_macro_arguments

JuMP.Containers.parse_macro_argumentsFunction
parse_macro_arguments(
+ (2, "A")  (2, "B")
source

Containers.build_error_fn

JuMP.Containers.build_error_fnFunction
build_error_fn(macro_name, args, source)

Return a function that can be used in place of Base.error, but which additionally prints the macro from which it was called.

source

Containers.parse_macro_arguments

JuMP.Containers.parse_macro_argumentsFunction
parse_macro_arguments(
     error_fn::Function,
     args;
     valid_kwargs::Union{Nothing,Vector{Symbol}} = nothing,
     num_positional_args::Union{Nothing,Int,UnitRange{Int}} = nothing,
-)

Returns a Tuple{Vector{Any},Dict{Symbol,Any}} containing the ordered positional arguments and a dictionary mapping the keyword arguments.

This specially handles the distinction of @foo(key = value) and @foo(; key = value) in macros.

An error is thrown if multiple keyword arguments are passed with the same key.

If valid_kwargs is a Vector{Symbol}, an error is thrown if a keyword is not in valid_kwargs.

If num_positional_args is not nothing, an error is thrown if the number of positional arguments is not in num_positional_args.

source

Containers.parse_ref_sets

JuMP.Containers.parse_ref_setsFunction
parse_ref_sets(
+)

Returns a Tuple{Vector{Any},Dict{Symbol,Any}} containing the ordered positional arguments and a dictionary mapping the keyword arguments.

This specially handles the distinction of @foo(key = value) and @foo(; key = value) in macros.

An error is thrown if multiple keyword arguments are passed with the same key.

If valid_kwargs is a Vector{Symbol}, an error is thrown if a keyword is not in valid_kwargs.

If num_positional_args is not nothing, an error is thrown if the number of positional arguments is not in num_positional_args.

source

Containers.parse_ref_sets

JuMP.Containers.parse_ref_setsFunction
parse_ref_sets(
     error_fn::Function,
     expr;
     invalid_index_variables::Vector{Symbol} = Symbol[],
-)

Helper function for macros to construct container objects.

Warning

This function is for advanced users implementing JuMP extensions. See container_code for more details.

Arguments

  • error_fn: a function that takes a String and throws an error, potentially annotating the input string with extra information such as from which macro it was thrown from. Use error if you do not want a modified error message.
  • expr: an Expr that specifies the container, for example, :(x[i = 1:3, [:red, :blue], k = S; i + k <= 6])

Returns

  1. name: the name of the container, if given, otherwise nothing
  2. index_vars: a Vector{Any} of names for the index variables, for example, [:i, gensym(), :k]. These may also be expressions, like :((i, j)) from a call like :(x[(i, j) in S]).
  3. indices: an iterator over the indices, for example, Containers.NestedIterator

Example

See container_code for a worked example.

source

Containers.build_name_expr

JuMP.Containers.build_name_exprFunction
build_name_expr(
+)

Helper function for macros to construct container objects.

Warning

This function is for advanced users implementing JuMP extensions. See container_code for more details.

Arguments

  • error_fn: a function that takes a String and throws an error, potentially annotating the input string with extra information such as from which macro it was thrown from. Use error if you do not want a modified error message.
  • expr: an Expr that specifies the container, for example, :(x[i = 1:3, [:red, :blue], k = S; i + k <= 6])

Returns

  1. name: the name of the container, if given, otherwise nothing
  2. index_vars: a Vector{Any} of names for the index variables, for example, [:i, gensym(), :k]. These may also be expressions, like :((i, j)) from a call like :(x[(i, j) in S]).
  3. indices: an iterator over the indices, for example, Containers.NestedIterator

Example

See container_code for a worked example.

source

Containers.build_name_expr

JuMP.Containers.build_name_exprFunction
build_name_expr(
     name::Union{Symbol,Nothing},
     index_vars::Vector,
     kwargs::Dict{Symbol,Any},
@@ -164,12 +164,12 @@
 ""
 
 julia> Containers.build_name_expr(:y, [:i, :j], Dict{Symbol,Any}(:base_name => "y"))
-:(string("y", "[", string($(Expr(:escape, :i))), ",", string($(Expr(:escape, :j))), "]"))
source

Containers.add_additional_args

JuMP.Containers.add_additional_argsFunction
add_additional_args(
+:(string("y", "[", string($(Expr(:escape, :i))), ",", string($(Expr(:escape, :j))), "]"))
source

Containers.add_additional_args

JuMP.Containers.add_additional_argsFunction
add_additional_args(
     call::Expr,
     args::Vector,
     kwargs::Dict{Symbol,Any};
     kwarg_exclude::Vector{Symbol} = Symbol[],
-)

Add the positional arguments args to the function call expression call, escaping each argument expression.

This function is able to incorporate additional positional arguments to calls that already have keyword arguments.

source

Containers.container_code

JuMP.Containers.container_codeFunction
container_code(
+)

Add the positional arguments args to the function call expression call, escaping each argument expression.

This function is able to incorporate additional positional arguments to calls that already have keyword arguments.

source

Containers.container_code

JuMP.Containers.container_codeFunction
container_code(
     index_vars::Vector{Any},
     indices::Expr,
     code,
@@ -194,7 +194,7 @@
     Dimension 2, ["A", "B"]
 And data, a 2×2 Matrix{String}:
  "A"   "B"
- "AA"  "BB"
source

Containers.AutoContainerType

JuMP.Containers.AutoContainerTypeType
AutoContainerType

Pass AutoContainerType to container to let the container type be chosen based on the type of the indices using default_container.

source

Containers.NestedIterator

JuMP.Containers.NestedIteratorType
struct NestedIterator{T}
+ "AA"  "BB"
source

Containers.AutoContainerType

JuMP.Containers.AutoContainerTypeType
AutoContainerType

Pass AutoContainerType to container to let the container type be chosen based on the type of the indices using default_container.

source

Containers.NestedIterator

JuMP.Containers.NestedIteratorType
struct NestedIterator{T}
     iterators::T # Tuple of functions
     condition::Function
 end

Iterators over the tuples that are produced by a nested for loop.

Construct a NestedIterator using nested.

Example

julia> iterators = (() -> 1:2, (i,) -> ["A", "B"]);
@@ -217,6 +217,6 @@
        end
 (1, "A")
 (1, "B")
-(2, "B")
source

Containers.VectorizedProductIterator

JuMP.Containers.VectorizedProductIteratorType
struct VectorizedProductIterator{T}
+(2, "B")
source

Containers.VectorizedProductIterator

JuMP.Containers.VectorizedProductIteratorType
struct VectorizedProductIterator{T}
     prod::Iterators.ProductIterator{T}
-end

A wrapper type for Iterators.ProuctIterator that discards shape information and returns a Vector.

Construct a VectorizedProductIterator using vectorized_product.

source
+end

A wrapper type for Iterators.ProuctIterator that discards shape information and returns a Vector.

Construct a VectorizedProductIterator using vectorized_product.

source diff --git a/dev/api/JuMP/index.html b/dev/api/JuMP/index.html index 211894e1fbb..337f101feec 100644 --- a/dev/api/JuMP/index.html +++ b/dev/api/JuMP/index.html @@ -15,7 +15,7 @@ julia> @build_constraint(x .>= 0) 2-element Vector{ScalarConstraint{AffExpr, MathOptInterface.GreaterThan{Float64}}}: ScalarConstraint{AffExpr, MathOptInterface.GreaterThan{Float64}}(x[1], MathOptInterface.GreaterThan{Float64}(-0.0)) - ScalarConstraint{AffExpr, MathOptInterface.GreaterThan{Float64}}(x[2], MathOptInterface.GreaterThan{Float64}(-0.0))source

@constraint

JuMP.@constraintMacro
@constraint(model, expr, args...; kwargs...)
+ ScalarConstraint{AffExpr, MathOptInterface.GreaterThan{Float64}}(x[2], MathOptInterface.GreaterThan{Float64}(-0.0))
source

@constraint

JuMP.@constraintMacro
@constraint(model, expr, args...; kwargs...)
 @constraint(model, [index_sets...], expr, args...; kwargs...)
 @constraint(model, name, expr, args...; kwargs...)
 @constraint(model, name[index_sets...], expr, args...; kwargs...)

Add a constraint described by the expression expr.

The name argument is optional. If index sets are passed, a container is built and the constraint may depend on the indices of the index sets.

The expression expr may be one of following forms:

  • func in set, constraining the function func to belong to the set set, which is either a MOI.AbstractSet or one of the JuMP shortcuts like SecondOrderCone or PSDCone

  • a <op> b, where <op> is one of ==, , >=, , <=

  • l <= f <= u or u >= f >= l, constraining the expression f to lie between l and u

  • f(x) ⟂ x, which defines a complementarity constraint

  • z --> {expr}, which defines an indicator constraint that activates when z is 1

  • !z --> {expr}, which defines an indicator constraint that activates when z is 0

  • z <--> {expr}, which defines a reified constraint

  • expr := rhs, which defines a Boolean equality constraint

Broadcasted comparison operators like .== are also supported for the case when the left- and right-hand sides of the comparison operator are arrays.

JuMP extensions may additionally provide support for constraint expressions which are not listed here.

Keyword arguments

  • base_name: sets the name prefix used to generate constraint names. It corresponds to the constraint name for scalar constraints, otherwise, the constraint names are set to base_name[...] for each index ....

  • container = :Auto: force the container type by passing container = Array,

container = DenseAxisArray, container = SparseAxisArray, or any another container type which is supported by a JuMP extension.

  • set_string_name::Bool = true: control whether to set the MOI.ConstraintName attribute. Passing set_string_name = false can improve performance.

Other keyword arguments may be supported by JuMP extensions.

Example

julia> model = Model();
@@ -52,7 +52,7 @@
 z --> {x[1] ≥ 0}
 
 julia> @constraint(model, !z --> {2 * x[2] <= 3})
-!z --> {2 x[2] ≤ 3}
source

@constraints

JuMP.@constraintsMacro
@constraints(model, args...)

Adds groups of constraints at once, in the same fashion as the @constraint macro.

The model must be the first argument, and multiple constraints can be added on multiple lines wrapped in a begin ... end block.

The macro returns a tuple containing the constraints that were defined.

Example

julia> model = Model();
+!z --> {2 x[2] ≤ 3}
source

@constraints

JuMP.@constraintsMacro
@constraints(model, args...)

Adds groups of constraints at once, in the same fashion as the @constraint macro.

The model must be the first argument, and multiple constraints can be added on multiple lines wrapped in a begin ... end block.

The macro returns a tuple containing the constraints that were defined.

Example

julia> model = Model();
 
 julia> @variable(model, w);
 
@@ -75,7 +75,7 @@
  sum_to_one[2] : y + z[2] = 1
  sum_to_one[3] : y + z[3] = 1
  x ≥ 1
- -w + y ≤ 2
source

@expression

JuMP.@expressionMacro
@expression(model::GenericModel, expression)
+ -w + y ≤ 2
source

@expression

JuMP.@expressionMacro
@expression(model::GenericModel, expression)
 @expression(model::GenericModel, [index_sets...], expression)
 @expression(model::GenericModel, name, expression)
 @expression(model::GenericModel, name[index_sets...], expression)

Efficiently builds and returns an expression.

The name argument is optional. If index sets are passed, a container is built and the expression may depend on the indices of the index sets.

Keyword arguments

  • container = :Auto: force the container type by passing container = Array, container = DenseAxisArray, container = SparseAxisArray, or any another container type which is supported by a JuMP extension.

Example

julia> model = Model();
@@ -102,7 +102,7 @@
 3-element Vector{AffExpr}:
  x[1] + x[2] + x[3]
  2 x[1] + 2 x[2] + 2 x[3]
- 3 x[1] + 3 x[2] + 3 x[3]
source

@expressions

JuMP.@expressionsMacro
@expressions(model, args...)

Adds multiple expressions to model at once, in the same fashion as the @expression macro.

The model must be the first argument, and multiple expressions can be added on multiple lines wrapped in a begin ... end block.

The macro returns a tuple containing the expressions that were defined.

Example

julia> model = Model();
+ 3 x[1] + 3 x[2] + 3 x[3]
source

@expressions

JuMP.@expressionsMacro
@expressions(model, args...)

Adds multiple expressions to model at once, in the same fashion as the @expression macro.

The model must be the first argument, and multiple expressions can be added on multiple lines wrapped in a begin ... end block.

The macro returns a tuple containing the expressions that were defined.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -116,7 +116,7 @@
            my_expr, x^2 + y^2
            my_expr_1[i = 1:2], a[i] - z[i]
        end)
-(x² + y², AffExpr[-z[1] + 4, -z[2] + 5])
source

@force_nonlinear

JuMP.@force_nonlinearMacro
@force_nonlinear(expr)

Change the parsing of expr to construct GenericNonlinearExpr instead of GenericAffExpr or GenericQuadExpr.

This macro works by walking expr and substituting all calls to +, -, *, /, and ^ in favor of ones that construct GenericNonlinearExpr.

This macro will error if the resulting expression does not produce a GenericNonlinearExpr because, for example, it is used on an expression that does not use the basic arithmetic operators.

When to use this macro

In most cases, you should not use this macro.

Use this macro only if the intended output type is a GenericNonlinearExpr and the regular macro calls destroy problem structure, or in rare cases, if the regular macro calls introduce a large amount of intermediate variables, for example, because they promote types to a common quadratic expression.

Example

Use-case one: preserve problem structure.

julia> model = Model();
+(x² + y², AffExpr[-z[1] + 4, -z[2] + 5])
source

@force_nonlinear

JuMP.@force_nonlinearMacro
@force_nonlinear(expr)

Change the parsing of expr to construct GenericNonlinearExpr instead of GenericAffExpr or GenericQuadExpr.

This macro works by walking expr and substituting all calls to +, -, *, /, and ^ in favor of ones that construct GenericNonlinearExpr.

This macro will error if the resulting expression does not produce a GenericNonlinearExpr because, for example, it is used on an expression that does not use the basic arithmetic operators.

When to use this macro

In most cases, you should not use this macro.

Use this macro only if the intended output type is a GenericNonlinearExpr and the regular macro calls destroy problem structure, or in rare cases, if the regular macro calls introduce a large amount of intermediate variables, for example, because they promote types to a common quadratic expression.

Example

Use-case one: preserve problem structure.

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -144,7 +144,7 @@
 2640
 
 julia> @allocated @expression(model, @force_nonlinear(x * 2.0 * (1 + x) * x))
-672
source

@objective

JuMP.@objectiveMacro
@objective(model::GenericModel, sense, func)

Set the objective sense to sense and objective function to func.

The objective sense can be either Min, Max, MOI.MIN_SENSE, MOI.MAX_SENSE or MOI.FEASIBILITY_SENSE. In order to set the sense programmatically, that is, when sense is a variable whose value is the sense, one of the three MOI.OptimizationSense values must be used.

Example

Minimize the value of the variable x, do:

julia> model = Model();
+672
source

@objective

JuMP.@objectiveMacro
@objective(model::GenericModel, sense, func)

Set the objective sense to sense and objective function to func.

The objective sense can be either Min, Max, MOI.MIN_SENSE, MOI.MAX_SENSE or MOI.FEASIBILITY_SENSE. In order to set the sense programmatically, that is, when sense is a variable whose value is the sense, one of the three MOI.OptimizationSense values must be used.

Example

Minimize the value of the variable x, do:

julia> model = Model();
 
 julia> @variable(model, x)
 x
@@ -165,7 +165,7 @@
 MIN_SENSE::OptimizationSense = 0
 
 julia> @objective(model, sense, x^2 - 2x + 1)
-x² - 2 x + 1
source

@operator

JuMP.@operatorMacro
@operator(model, operator, dim, f[, ∇f[, ∇²f]])

Add the nonlinear operator operator in model with dim arguments, and create a new NonlinearOperator object called operator in the current scope.

The function f evaluates the operator and must return a scalar.

The optional function ∇f evaluates the first derivative, and the optional function ∇²f evaluates the second derivative.

∇²f may be provided only if ∇f is also provided.

Univariate syntax

If dim == 1, then the method signatures of each function must be:

  • f(::T)::T where {T<:Real}
  • ∇f(::T)::T where {T<:Real}
  • ∇²f(::T)::T where {T<:Real}

Multivariate syntax

If dim > 1, then the method signatures of each function must be:

  • f(x::T...)::T where {T<:Real}
  • ∇f(g::AbstractVector{T}, x::T...)::Nothing where {T<:Real}
  • ∇²f(H::AbstractMatrix{T}, x::T...)::Nothing where {T<:Real}

Where the gradient vector g and Hessian matrix H are filled in-place. For the Hessian, you must fill in the non-zero lower-triangular entries only. Setting an off-diagonal upper-triangular element may error.

Example

julia> model = Model();
+x² - 2 x + 1
source

@operator

JuMP.@operatorMacro
@operator(model, operator, dim, f[, ∇f[, ∇²f]])

Add the nonlinear operator operator in model with dim arguments, and create a new NonlinearOperator object called operator in the current scope.

The function f evaluates the operator and must return a scalar.

The optional function ∇f evaluates the first derivative, and the optional function ∇²f evaluates the second derivative.

∇²f may be provided only if ∇f is also provided.

Univariate syntax

If dim == 1, then the method signatures of each function must be:

  • f(::T)::T where {T<:Real}
  • ∇f(::T)::T where {T<:Real}
  • ∇²f(::T)::T where {T<:Real}

Multivariate syntax

If dim > 1, then the method signatures of each function must be:

  • f(x::T...)::T where {T<:Real}
  • ∇f(g::AbstractVector{T}, x::T...)::Nothing where {T<:Real}
  • ∇²f(H::AbstractMatrix{T}, x::T...)::Nothing where {T<:Real}

Where the gradient vector g and Hessian matrix H are filled in-place. For the Hessian, you must fill in the non-zero lower-triangular entries only. Setting an off-diagonal upper-triangular element may error.

Example

julia> model = Model();
 
 julia> @variable(model, x)
 x
@@ -204,7 +204,7 @@
 f (generic function with 1 method)
 
 julia> op_f = model[:op_f] = add_nonlinear_operator(model, 1, f; name = :op_f)
-NonlinearOperator(f, :op_f)
source

@variable

JuMP.@variableMacro
@variable(model, expr, args..., kw_args...)

Add a variable to the model model described by the expression expr, the positional arguments args and the keyword arguments kw_args.

Anonymous and named variables

expr must be one of the forms:

  • Omitted, like @variable(model), which creates an anonymous variable
  • A single symbol like @variable(model, x)
  • A container expression like @variable(model, x[i=1:3])
  • An anonymous container expression like @variable(model, [i=1:3])

Bounds

In addition, the expression can have bounds, such as:

  • @variable(model, x >= 0)
  • @variable(model, x <= 0)
  • @variable(model, x == 0)
  • @variable(model, 0 <= x <= 1)

and bounds can depend on the indices of the container expressions:

  • @variable(model, -i <= x[i=1:3] <= i)

Sets

You can explicitly specify the set to which the variable belongs:

  • @variable(model, x in MOI.Interval(0.0, 1.0))

For more information on this syntax, read Variables constrained on creation.

Positional arguments

The recognized positional arguments in args are the following:

  • Bin: restricts the variable to the MOI.ZeroOne set, that is, {0, 1}. For example, @variable(model, x, Bin). Note: you cannot use @variable(model, Bin), use the binary keyword instead.
  • Int: restricts the variable to the set of integers, that is, ..., -2, -1, 0, 1, 2, ... For example, @variable(model, x, Int). Note: you cannot use @variable(model, Int), use the integer keyword instead.
  • Symmetric: Only available when creating a square matrix of variables, that is when expr is of the form varname[1:n,1:n] or varname[i=1:n,j=1:n], it creates a symmetric matrix of variables.
  • PSD: A restrictive extension to Symmetric which constraints a square matrix of variables to Symmetric and constrains to be positive semidefinite.

Keyword arguments

Four keyword arguments are useful in all cases:

  • base_name: Sets the name prefix used to generate variable names. It corresponds to the variable name for scalar variable, otherwise, the variable names are set to base_name[...] for each index ... of the axes axes.
  • start::Float64: specify the value passed to set_start_value for each variable
  • container: specify the container type. See Forcing the container type for more information.
  • set_string_name::Bool = true: control whether to set the MOI.VariableName attribute. Passing set_string_name = false can improve performance.

Other keyword arguments are needed to disambiguate sitations with anonymous variables:

  • lower_bound::Float64: an alternative to x >= lb, sets the value of the variable lower bound.
  • upper_bound::Float64: an alternative to x <= ub, sets the value of the variable upper bound.
  • binary::Bool: an alternative to passing Bin, sets whether the variable is binary or not.
  • integer::Bool: an alternative to passing Int, sets whether the variable is integer or not.
  • set::MOI.AbstractSet: an alternative to using x in set
  • variable_type: used by JuMP extensions. See Extend @variable for more information.

Example

The following are equivalent ways of creating a variable x of name x with lower bound 0:

julia> model = Model();
+NonlinearOperator(f, :op_f)
source

@variable

JuMP.@variableMacro
@variable(model, expr, args..., kw_args...)

Add a variable to the model model described by the expression expr, the positional arguments args and the keyword arguments kw_args.

Anonymous and named variables

expr must be one of the forms:

  • Omitted, like @variable(model), which creates an anonymous variable
  • A single symbol like @variable(model, x)
  • A container expression like @variable(model, x[i=1:3])
  • An anonymous container expression like @variable(model, [i=1:3])

Bounds

In addition, the expression can have bounds, such as:

  • @variable(model, x >= 0)
  • @variable(model, x <= 0)
  • @variable(model, x == 0)
  • @variable(model, 0 <= x <= 1)

and bounds can depend on the indices of the container expressions:

  • @variable(model, -i <= x[i=1:3] <= i)

Sets

You can explicitly specify the set to which the variable belongs:

  • @variable(model, x in MOI.Interval(0.0, 1.0))

For more information on this syntax, read Variables constrained on creation.

Positional arguments

The recognized positional arguments in args are the following:

  • Bin: restricts the variable to the MOI.ZeroOne set, that is, {0, 1}. For example, @variable(model, x, Bin). Note: you cannot use @variable(model, Bin), use the binary keyword instead.
  • Int: restricts the variable to the set of integers, that is, ..., -2, -1, 0, 1, 2, ... For example, @variable(model, x, Int). Note: you cannot use @variable(model, Int), use the integer keyword instead.
  • Symmetric: Only available when creating a square matrix of variables, that is when expr is of the form varname[1:n,1:n] or varname[i=1:n,j=1:n], it creates a symmetric matrix of variables.
  • PSD: A restrictive extension to Symmetric which constraints a square matrix of variables to Symmetric and constrains to be positive semidefinite.

Keyword arguments

Four keyword arguments are useful in all cases:

  • base_name: Sets the name prefix used to generate variable names. It corresponds to the variable name for scalar variable, otherwise, the variable names are set to base_name[...] for each index ... of the axes axes.
  • start::Float64: specify the value passed to set_start_value for each variable
  • container: specify the container type. See Forcing the container type for more information.
  • set_string_name::Bool = true: control whether to set the MOI.VariableName attribute. Passing set_string_name = false can improve performance.

Other keyword arguments are needed to disambiguate sitations with anonymous variables:

  • lower_bound::Float64: an alternative to x >= lb, sets the value of the variable lower bound.
  • upper_bound::Float64: an alternative to x <= ub, sets the value of the variable upper bound.
  • binary::Bool: an alternative to passing Bin, sets whether the variable is binary or not.
  • integer::Bool: an alternative to passing Int, sets whether the variable is integer or not.
  • set::MOI.AbstractSet: an alternative to using x in set
  • variable_type: used by JuMP extensions. See Extend @variable for more information.

Example

The following are equivalent ways of creating a variable x of name x with lower bound 0:

julia> model = Model();
 
 julia> @variable(model, x >= 0)
 x
julia> model = Model();
@@ -233,14 +233,14 @@
 3-element Vector{VariableRef}:
  _[7]
  _[8]
- _[9]
source

@variables

JuMP.@variablesMacro
@variables(model, args...)

Adds multiple variables to model at once, in the same fashion as the @variable macro.

The model must be the first argument, and multiple variables can be added on multiple lines wrapped in a begin ... end block.

The macro returns a tuple containing the variables that were defined.

Example

julia> model = Model();
+ _[9]
source

@variables

JuMP.@variablesMacro
@variables(model, args...)

Adds multiple variables to model at once, in the same fashion as the @variable macro.

The model must be the first argument, and multiple variables can be added on multiple lines wrapped in a begin ... end block.

The macro returns a tuple containing the variables that were defined.

Example

julia> model = Model();
 
 julia> @variables(model, begin
            x
            y[i = 1:2] >= 0, (start = i)
            z, Bin, (start = 0, base_name = "Z")
        end)
-(x, VariableRef[y[1], y[2]], Z)
Note

Keyword arguments must be contained within parentheses (refer to the example above).

source

add_bridge

JuMP.add_bridgeFunction
add_bridge(
+(x, VariableRef[y[1], y[2]], Z)
Note

Keyword arguments must be contained within parentheses (refer to the example above).

source

add_bridge

JuMP.add_bridgeFunction
add_bridge(
     model::GenericModel{T},
     BT::Type{<:MOI.Bridges.AbstractBridge};
     coefficient_type::Type{S} = T,
@@ -252,11 +252,11 @@
            model,
            MOI.Bridges.Constraint.NumberConversionBridge;
            coefficient_type = Complex{Float64}
-       )
source

add_constraint

JuMP.add_constraintFunction
add_constraint(
+       )
source

add_constraint

JuMP.add_constraintFunction
add_constraint(
     model::GenericModel,
     con::AbstractConstraint,
     name::String= "",
-)

This method should only be implemented by developers creating JuMP extensions. It should never be called by users of JuMP.

source

add_nonlinear_operator

JuMP.add_nonlinear_operatorFunction
add_nonlinear_operator(
+)

This method should only be implemented by developers creating JuMP extensions. It should never be called by users of JuMP.

source

add_nonlinear_operator

JuMP.add_nonlinear_operatorFunction
add_nonlinear_operator(
     model::Model,
     dim::Int,
     f::Function,
@@ -284,7 +284,7 @@
 f(x)
 
 julia> op_f(2.0)
-4.0
source

add_to_expression!

JuMP.add_to_expression!Function
add_to_expression!(expression, terms...)

Updates expression in-place to expression + (*)(terms...).

This is typically much more efficient than expression += (*)(terms...) because it avoids the temorary allocation of the right-hand side term.

For example, add_to_expression!(expression, a, b) produces the same result as expression += a*b, and add_to_expression!(expression, a) produces the same result as expression += a.

When to implement

Only a few methods are defined, mostly for internal use, and only for the cases when:

  1. they can be implemented efficiently
  2. expression is capable of storing the result. For example, add_to_expression!(::AffExpr, ::GenericVariableRef, ::GenericVariableRef) is not defined because a GenericAffExpr cannot store the product of two variables.

Example

julia> model = Model();
+4.0
source

add_to_expression!

JuMP.add_to_expression!Function
add_to_expression!(expression, terms...)

Updates expression in-place to expression + (*)(terms...).

This is typically much more efficient than expression += (*)(terms...) because it avoids the temorary allocation of the right-hand side term.

For example, add_to_expression!(expression, a, b) produces the same result as expression += a*b, and add_to_expression!(expression, a) produces the same result as expression += a.

When to implement

Only a few methods are defined, mostly for internal use, and only for the cases when:

  1. they can be implemented efficiently
  2. expression is capable of storing the result. For example, add_to_expression!(::AffExpr, ::GenericVariableRef, ::GenericVariableRef) is not defined because a GenericAffExpr cannot store the product of two variables.

Example

julia> model = Model();
 
 julia> @variable(model, x)
 x
@@ -316,7 +316,7 @@
 3 x[1] + 3 x[2]
 
 julia> ex2
-2 x[1] + 2 x[2]
source

add_to_function_constant

JuMP.add_to_function_constantFunction
add_to_function_constant(constraint::ConstraintRef, value)

Add value to the function constant term of constraint.

Note that for scalar constraints, JuMP will aggregate all constant terms onto the right-hand side of the constraint so instead of modifying the function, the set will be translated by -value. For example, given a constraint 2x <= 3, add_to_function_constant(c, 4) will modify it to 2x <= -1.

Example

For scalar constraints, the set is translated by -value:

julia> model = Model();
+2 x[1] + 2 x[2]
source

add_to_function_constant

JuMP.add_to_function_constantFunction
add_to_function_constant(constraint::ConstraintRef, value)

Add value to the function constant term of constraint.

Note that for scalar constraints, JuMP will aggregate all constant terms onto the right-hand side of the constraint so instead of modifying the function, the set will be translated by -value. For example, given a constraint 2x <= 3, add_to_function_constant(c, 4) will modify it to 2x <= -1.

Example

For scalar constraints, the set is translated by -value:

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -338,7 +338,7 @@
 julia> add_to_function_constant(con, [1, 2, 2])
 
 julia> con
-con : [x + y + 1, x + 2, y + 2] ∈ MathOptInterface.SecondOrderCone(3)
source

add_variable

JuMP.add_variableFunction
add_variable(m::GenericModel, v::AbstractVariable, name::String = "")

This method should only be implemented by developers creating JuMP extensions. It should never be called by users of JuMP.

source

all_constraints

JuMP.all_constraintsFunction
all_constraints(model::GenericModel, function_type, set_type)::Vector{<:ConstraintRef}

Return a list of all constraints currently in the model where the function has type function_type and the set has type set_type. The constraints are ordered by creation time.

See also list_of_constraint_types and num_constraints.

Example

julia> model = Model();
+con : [x + y + 1, x + 2, y + 2] ∈ MathOptInterface.SecondOrderCone(3)
source

add_variable

JuMP.add_variableFunction
add_variable(m::GenericModel, v::AbstractVariable, name::String = "")

This method should only be implemented by developers creating JuMP extensions. It should never be called by users of JuMP.

source

all_constraints

JuMP.all_constraintsFunction
all_constraints(model::GenericModel, function_type, set_type)::Vector{<:ConstraintRef}

Return a list of all constraints currently in the model where the function has type function_type and the set has type set_type. The constraints are ordered by creation time.

See also list_of_constraint_types and num_constraints.

Example

julia> model = Model();
 
 julia> @variable(model, x >= 0, Bin);
 
@@ -354,7 +354,7 @@
 
 julia> all_constraints(model, AffExpr, MOI.LessThan{Float64})
 1-element Vector{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{Float64}, MathOptInterface.LessThan{Float64}}, ScalarShape}}:
- 2 x ≤ 1
source
all_constraints(
+ 2 x ≤ 1
source
all_constraints(
     model::GenericModel;
     include_variable_in_set_constraints::Bool,
 )::Vector{ConstraintRef}

Return a list of all constraints in model.

If include_variable_in_set_constraints == true, then VariableRef constraints such as VariableRef-in-Integer are included. To return only the structural constraints (for example, the rows in the constraint matrix of a linear program), pass include_variable_in_set_constraints = false.

Example

julia> model = Model();
@@ -375,7 +375,7 @@
 julia> all_constraints(model; include_variable_in_set_constraints = false)
 2-element Vector{ConstraintRef}:
  2 x ≤ 1
- x ^ 2.0 - 1.0 ≤ 0

Performance considerations

Note that this function is type-unstable because it returns an abstractly typed vector. If performance is a problem, consider using list_of_constraint_types and a function barrier. See the Performance tips for extensions section of the documentation for more details.

source

all_variables

JuMP.all_variablesFunction
all_variables(model::GenericModel{T})::Vector{GenericVariableRef{T}} where {T}

Returns a list of all variables currently in the model. The variables are ordered by creation time.

Example

julia> model = Model();
+ x ^ 2.0 - 1.0 ≤ 0

Performance considerations

Note that this function is type-unstable because it returns an abstractly typed vector. If performance is a problem, consider using list_of_constraint_types and a function barrier. See the Performance tips for extensions section of the documentation for more details.

source

all_variables

JuMP.all_variablesFunction
all_variables(model::GenericModel{T})::Vector{GenericVariableRef{T}} where {T}

Returns a list of all variables currently in the model. The variables are ordered by creation time.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -384,12 +384,12 @@
 julia> all_variables(model)
 2-element Vector{VariableRef}:
  x
- y
source

anonymous_name

JuMP.anonymous_nameFunction
anonymous_name(::MIME, x::AbstractVariableRef)

The name to use for an anonymous variable x when printing.

Example

julia> model = Model();
+ y
source

anonymous_name

JuMP.anonymous_nameFunction
anonymous_name(::MIME, x::AbstractVariableRef)

The name to use for an anonymous variable x when printing.

Example

julia> model = Model();
 
 julia> x = @variable(model);
 
 julia> anonymous_name(MIME("text/plain"), x)
-"_[1]"
source

backend

JuMP.backendFunction
backend(model::GenericModel)

Return the lower-level MathOptInterface model that sits underneath JuMP. This model depends on which operating mode JuMP is in (see mode).

  • If JuMP is in DIRECT mode (that is, the model was created using direct_model), the backend will be the optimizer passed to direct_model.
  • If JuMP is in MANUAL or AUTOMATIC mode, the backend is a MOI.Utilities.CachingOptimizer.

Use index to get the index of a variable or constraint in the backend model.

Warning

This function should only be used by advanced users looking to access low-level MathOptInterface or solver-specific functionality.

Notes

If JuMP is not in DIRECT mode, the type returned by backend may change between any JuMP releases. Therefore, only use the public API exposed by MathOptInterface, and do not access internal fields. If you require access to the innermost optimizer, see unsafe_backend. Alternatively, use direct_model to create a JuMP model in DIRECT mode.

See also: unsafe_backend.

Example

julia> import HiGHS
+"_[1]"
source

backend

JuMP.backendFunction
backend(model::GenericModel)

Return the lower-level MathOptInterface model that sits underneath JuMP. This model depends on which operating mode JuMP is in (see mode).

  • If JuMP is in DIRECT mode (that is, the model was created using direct_model), the backend will be the optimizer passed to direct_model.
  • If JuMP is in MANUAL or AUTOMATIC mode, the backend is a MOI.Utilities.CachingOptimizer.

Use index to get the index of a variable or constraint in the backend model.

Warning

This function should only be used by advanced users looking to access low-level MathOptInterface or solver-specific functionality.

Notes

If JuMP is not in DIRECT mode, the type returned by backend may change between any JuMP releases. Therefore, only use the public API exposed by MathOptInterface, and do not access internal fields. If you require access to the innermost optimizer, see unsafe_backend. Alternatively, use direct_model to create a JuMP model in DIRECT mode.

See also: unsafe_backend.

Example

julia> import HiGHS
 
 julia> model = direct_model(HiGHS.Optimizer());
 
@@ -402,7 +402,7 @@
 A HiGHS model with 1 columns and 0 rows.
 
 julia> index(x)
-MOI.VariableIndex(1)
source

barrier_iterations

JuMP.barrier_iterationsFunction
barrier_iterations(model::GenericModel)

If available, returns the cumulative number of barrier iterations during the most-recent optimization (the MOI.BarrierIterations attribute).

Throws a MOI.GetAttributeNotAllowed error if the attribute is not implemented by the solver.

Example

julia> import HiGHS
+MOI.VariableIndex(1)
source

barrier_iterations

JuMP.barrier_iterationsFunction
barrier_iterations(model::GenericModel)

If available, returns the cumulative number of barrier iterations during the most-recent optimization (the MOI.BarrierIterations attribute).

Throws a MOI.GetAttributeNotAllowed error if the attribute is not implemented by the solver.

Example

julia> import HiGHS
 
 julia> model = Model(HiGHS.Optimizer);
 
@@ -411,7 +411,7 @@
 julia> optimize!(model)
 
 julia> barrier_iterations(model)
-0
source

bridge_constraints

JuMP.bridge_constraintsFunction
bridge_constraints(model::GenericModel)

When in direct mode, return false.

When in manual or automatic mode, return a Bool indicating whether the optimizer is set and unsupported constraints are automatically bridged to equivalent supported constraints when an appropriate transformation is available.

Example

julia> import Ipopt
+0
source

bridge_constraints

JuMP.bridge_constraintsFunction
bridge_constraints(model::GenericModel)

When in direct mode, return false.

When in manual or automatic mode, return a Bool indicating whether the optimizer is set and unsupported constraints are automatically bridged to equivalent supported constraints when an appropriate transformation is available.

Example

julia> import Ipopt
 
 julia> model = Model(Ipopt.Optimizer);
 
@@ -421,14 +421,14 @@
 julia> model = Model(Ipopt.Optimizer; add_bridges = false);
 
 julia> bridge_constraints(model)
-false
source

build_constraint

JuMP.build_constraintFunction
build_constraint(error_fn::Function, func, set, args...; kwargs...)

This method should only be implemented by developers creating JuMP extensions. It should never be called by users of JuMP.

source

build_variable

JuMP.build_variableFunction
build_variable(
+false
source

build_constraint

JuMP.build_constraintFunction
build_constraint(error_fn::Function, func, set, args...; kwargs...)

This method should only be implemented by developers creating JuMP extensions. It should never be called by users of JuMP.

source

build_variable

JuMP.build_variableFunction
build_variable(
     error_fn::Function,
     info::VariableInfo,
     args...;
     kwargs...,
 )

Return a new AbstractVariable object.

This method should only be implemented by developers creating JuMP extensions. It should never be called by users of JuMP.

Arguments

  • error_fn: a function to call instead of error. error_fn annotates the error message with additional information for the user.
  • info: an instance of VariableInfo. This has a variety of fields relating to the variable such as info.lower_bound and info.binary.
  • args: optional additional positional arguments for extending the @variable macro.
  • kwargs: optional keyword arguments for extending the @variable macro.

See also: @variable

Warning

Extensions should define a method with ONE positional argument to dispatch the call to a different method. Creating an extension that relies on multiple positional arguments leads to MethodErrors if the user passes the arguments in the wrong order.

Example

@variable(model, x, Foo)

will call

build_variable(error_fn::Function, info::VariableInfo, ::Type{Foo})

Passing special-case positional arguments such as Bin, Int, and PSD is okay, along with keyword arguments:

@variable(model, x, Int, Foo(), mykwarg = true)
 # or
-@variable(model, x, Foo(), Int, mykwarg = true)

will call

build_variable(error_fn::Function, info::VariableInfo, ::Foo; mykwarg)

and info.integer will be true.

Note that the order of the positional arguments does not matter.

source

callback_node_status

JuMP.callback_node_statusFunction
callback_node_status(cb_data, model::GenericModel)

Return an MOI.CallbackNodeStatusCode enum, indicating if the current primal solution available from callback_value is integer feasible.

Example

julia> import GLPK
+@variable(model, x, Foo(), Int, mykwarg = true)

will call

build_variable(error_fn::Function, info::VariableInfo, ::Foo; mykwarg)

and info.integer will be true.

Note that the order of the positional arguments does not matter.

source

callback_node_status

JuMP.callback_node_statusFunction
callback_node_status(cb_data, model::GenericModel)

Return an MOI.CallbackNodeStatusCode enum, indicating if the current primal solution available from callback_value is integer feasible.

Example

julia> import GLPK
 
 julia> model = Model(GLPK.Optimizer);
 
@@ -449,7 +449,7 @@
 Status is: CALLBACK_NODE_STATUS_UNKNOWN
 Status is: CALLBACK_NODE_STATUS_UNKNOWN
 Status is: CALLBACK_NODE_STATUS_INTEGER
-Status is: CALLBACK_NODE_STATUS_INTEGER
source

callback_value

JuMP.callback_valueFunction
callback_value(cb_data, x::GenericVariableRef)
+Status is: CALLBACK_NODE_STATUS_INTEGER
source

callback_value

JuMP.callback_valueFunction
callback_value(cb_data, x::GenericVariableRef)
 callback_value(cb_data, x::Union{GenericAffExpr,GenericQuadExpr})

Return the primal solution of x inside a callback.

cb_data is the argument to the callback function, and the type is dependent on the solver.

Use callback_node_status to check whether a solution is available.

Example

julia> import GLPK
 
 julia> model = Model(GLPK.Optimizer);
@@ -471,7 +471,7 @@
 
 julia> optimize!(model)
 Solution is: 10.0
-Solution is: 10.0
source

check_belongs_to_model

JuMP.check_belongs_to_modelFunction
check_belongs_to_model(x::AbstractJuMPScalar, model::AbstractModel)
+Solution is: 10.0
source

check_belongs_to_model

JuMP.check_belongs_to_modelFunction
check_belongs_to_model(x::AbstractJuMPScalar, model::AbstractModel)
 check_belongs_to_model(x::AbstractConstraint, model::AbstractModel)

Throw VariableNotOwned if the owner_model of x is not model.

Example

julia> model = Model();
 
 julia> @variable(model, x);
@@ -483,7 +483,7 @@
 julia> check_belongs_to_model(x, model_2)
 ERROR: VariableNotOwned{VariableRef}(x): the variable x cannot be used in this model because
 it belongs to a different model.
-[...]
source

coefficient

JuMP.coefficientFunction
coefficient(v1::GenericVariableRef{T}, v2::GenericVariableRef{T}) where {T}

Return one(T) if v1 == v2 and zero(T) otherwise.

This is a fallback for other coefficient methods to simplify code in which the expression may be a single variable.

Example

julia> model = Model();
+[...]
source

coefficient

JuMP.coefficientFunction
coefficient(v1::GenericVariableRef{T}, v2::GenericVariableRef{T}) where {T}

Return one(T) if v1 == v2 and zero(T) otherwise.

This is a fallback for other coefficient methods to simplify code in which the expression may be a single variable.

Example

julia> model = Model();
 
 julia> @variable(model, x[1:2]);
 
@@ -491,14 +491,14 @@
 1.0
 
 julia> coefficient(x[1], x[2])
-0.0
source
coefficient(a::GenericAffExpr{C,V}, v::V) where {C,V}

Return the coefficient associated with variable v in the affine expression a.

Example

julia> model = Model();
+0.0
source
coefficient(a::GenericAffExpr{C,V}, v::V) where {C,V}

Return the coefficient associated with variable v in the affine expression a.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
 julia> expr = 2.0 * x + 1.0;
 
 julia> coefficient(expr, x)
-2.0
source
coefficient(a::GenericQuadExpr{C,V}, v1::V, v2::V) where {C,V}

Return the coefficient associated with the term v1 * v2 in the quadratic expression a.

Note that coefficient(a, v1, v2) is the same as coefficient(a, v2, v1).

Example

julia> model = Model();
+2.0
source
coefficient(a::GenericQuadExpr{C,V}, v1::V, v2::V) where {C,V}

Return the coefficient associated with the term v1 * v2 in the quadratic expression a.

Note that coefficient(a, v1, v2) is the same as coefficient(a, v2, v1).

Example

julia> model = Model();
 
 julia> @variable(model, x[1:2]);
 
@@ -511,14 +511,14 @@
 2.0
 
 julia> coefficient(expr, x[1], x[1])
-0.0
source
coefficient(a::GenericQuadExpr{C,V}, v::V) where {C,V}

Return the coefficient associated with variable v in the affine component of a.

Example

julia> model = Model();
+0.0
source
coefficient(a::GenericQuadExpr{C,V}, v::V) where {C,V}

Return the coefficient associated with variable v in the affine component of a.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
 julia> expr = 2.0 * x^2 + 3.0 * x;
 
 julia> coefficient(expr, x)
-3.0
source

compute_conflict!

JuMP.compute_conflict!Function
compute_conflict!(model::GenericModel)

Compute a conflict if the model is infeasible.

The conflict is also called the Irreducible Infeasible Subsystem (IIS).

If an optimizer has not been set yet (see set_optimizer), a NoOptimizer error is thrown.

The status of the conflict can be checked with the MOI.ConflictStatus model attribute. Then, the status for each constraint can be queried with the MOI.ConstraintConflictStatus attribute.

See also: copy_conflict

Example

julia> using JuMP
+3.0
source

compute_conflict!

JuMP.compute_conflict!Function
compute_conflict!(model::GenericModel)

Compute a conflict if the model is infeasible.

The conflict is also called the Irreducible Infeasible Subsystem (IIS).

If an optimizer has not been set yet (see set_optimizer), a NoOptimizer error is thrown.

The status of the conflict can be checked with the MOI.ConflictStatus model attribute. Then, the status for each constraint can be queried with the MOI.ConstraintConflictStatus attribute.

See also: copy_conflict

Example

julia> using JuMP
 
 julia> model = Model(Gurobi.Optimizer);
 
@@ -535,21 +535,21 @@
 julia> compute_conflict!(model)
 
 julia> get_attribute(model, MOI.ConflictStatus())
-CONFLICT_FOUND::ConflictStatusCode = 3
source

constant

JuMP.constantFunction
constant(aff::GenericAffExpr{C,V})::C

Return the constant of the affine expression.

Example

julia> model = Model();
+CONFLICT_FOUND::ConflictStatusCode = 3
source

constant

JuMP.constantFunction
constant(aff::GenericAffExpr{C,V})::C

Return the constant of the affine expression.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
 julia> aff = 2.0 * x + 3.0;
 
 julia> constant(aff)
-3.0
source
constant(quad::GenericQuadExpr{C,V})::C

Return the constant of the quadratic expression.

Example

julia> model = Model();
+3.0
source
constant(quad::GenericQuadExpr{C,V})::C

Return the constant of the quadratic expression.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
 julia> quad = 2.0 * x^2 + 3.0;
 
 julia> constant(quad)
-3.0
source

constraint_by_name

JuMP.constraint_by_nameFunction
constraint_by_name(model::AbstractModel, name::String, [F, S])::Union{ConstraintRef,Nothing}

Return the reference of the constraint with name attribute name or Nothing if no constraint has this name attribute.

Throws an error if several constraints have name as their name attribute.

If F and S are provided, this method addititionally throws an error if the constraint is not an F-in-S contraint where F is either the JuMP or MOI type of the function and S is the MOI type of the set.

Providing F and S is recommended if you know the type of the function and set since its returned type can be inferred while for the method above (that is, without F and S), the exact return type of the constraint index cannot be inferred.

Example

julia> model = Model();
+3.0
source

constraint_by_name

JuMP.constraint_by_nameFunction
constraint_by_name(model::AbstractModel, name::String, [F, S])::Union{ConstraintRef,Nothing}

Return the reference of the constraint with name attribute name or Nothing if no constraint has this name attribute.

Throws an error if several constraints have name as their name attribute.

If F and S are provided, this method addititionally throws an error if the constraint is not an F-in-S contraint where F is either the JuMP or MOI type of the function and S is the MOI type of the set.

Providing F and S is recommended if you know the type of the function and set since its returned type can be inferred while for the method above (that is, without F and S), the exact return type of the constraint index cannot be inferred.

Example

julia> model = Model();
 
 julia> @variable(model, x)
 x
@@ -565,7 +565,7 @@
 julia> constraint_by_name(model, "con", AffExpr, MOI.EqualTo{Float64})
 
 julia> constraint_by_name(model, "con", QuadExpr, MOI.EqualTo{Float64})
-con : x² = 1
source

constraint_object

JuMP.constraint_objectFunction
constraint_object(con_ref::ConstraintRef)

Return the underlying constraint data for the constraint referenced by con_ref.

Example

A scalar constraint:

julia> model = Model();
+con : x² = 1
source

constraint_object

JuMP.constraint_objectFunction
constraint_object(con_ref::ConstraintRef)

Return the underlying constraint data for the constraint referenced by con_ref.

Example

A scalar constraint:

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -602,7 +602,7 @@
  x[3]
 
 julia> object.set
-MathOptInterface.SecondOrderCone(3)
source

constraint_ref_with_index

JuMP.constraint_ref_with_indexFunction
constraint_ref_with_index(model::AbstractModel, index::MOI.ConstraintIndex)

Return a ConstraintRef of model corresponding to index.

This function is a helper function used internally by JuMP and some JuMP extensions. It should not need to be called in user-code.

source

constraint_string

JuMP.constraint_stringFunction
constraint_string(
+MathOptInterface.SecondOrderCone(3)
source

constraint_ref_with_index

JuMP.constraint_ref_with_indexFunction
constraint_ref_with_index(model::AbstractModel, index::MOI.ConstraintIndex)

Return a ConstraintRef of model corresponding to index.

This function is a helper function used internally by JuMP and some JuMP extensions. It should not need to be called in user-code.

source

constraint_string

JuMP.constraint_stringFunction
constraint_string(
     mode::MIME,
     ref::ConstraintRef;
     in_math_mode::Bool = false,
@@ -613,7 +613,7 @@
 julia> @constraint(model, c, 2 * x <= 1);
 
 julia> constraint_string(MIME("text/plain"), c)
-"c : 2 x ≤ 1"
source

constraints_string

JuMP.constraints_stringFunction
constraints_string(mode, model::AbstractModel)::Vector{String}

Return a list of Strings describing each constraint of the model.

Example

julia> model = Model();
+"c : 2 x ≤ 1"
source

constraints_string

JuMP.constraints_stringFunction
constraints_string(mode, model::AbstractModel)::Vector{String}

Return a list of Strings describing each constraint of the model.

Example

julia> model = Model();
 
 julia> @variable(model, x >= 0);
 
@@ -622,7 +622,7 @@
 julia> constraints_string(MIME("text/plain"), model)
 2-element Vector{String}:
  "c : 2 x ≤ 1"
- "x ≥ 0"
source

copy_conflict

JuMP.copy_conflictFunction
copy_conflict(model::GenericModel)

Return a copy of the current conflict for the model model and a GenericReferenceMap that can be used to obtain the variable and constraint reference of the new model corresponding to a given model's reference.

This is a convenience function that provides a filtering function for copy_model.

Note

Model copy is not supported in DIRECT mode, that is, when a model is constructed using the direct_model constructor instead of the Model constructor. Moreover, independently on whether an optimizer was provided at model construction, the new model will have no optimizer, that is, an optimizer will have to be provided to the new model in the optimize! call.

Example

In the following example, a model model is constructed with a variable x and two constraints c1 and c2. This model has no solution, as the two constraints are mutually exclusive. The solver is asked to compute a conflict with compute_conflict!. The parts of model participating in the conflict are then copied into a model iis_model.

julia> using JuMP
+ "x ≥ 0"
source

copy_conflict

JuMP.copy_conflictFunction
copy_conflict(model::GenericModel)

Return a copy of the current conflict for the model model and a GenericReferenceMap that can be used to obtain the variable and constraint reference of the new model corresponding to a given model's reference.

This is a convenience function that provides a filtering function for copy_model.

Note

Model copy is not supported in DIRECT mode, that is, when a model is constructed using the direct_model constructor instead of the Model constructor. Moreover, independently on whether an optimizer was provided at model construction, the new model will have no optimizer, that is, an optimizer will have to be provided to the new model in the optimize! call.

Example

In the following example, a model model is constructed with a variable x and two constraints c1 and c2. This model has no solution, as the two constraints are mutually exclusive. The solver is asked to compute a conflict with compute_conflict!. The parts of model participating in the conflict are then copied into a model iis_model.

julia> using JuMP
 
 julia> import Gurobi
 
@@ -650,7 +650,7 @@
 Feasibility
 Subject to
  c1 : x ≥ 2
- c2 : x ≤ 1
source

copy_extension_data

JuMP.copy_extension_dataFunction
copy_extension_data(data, new_model::AbstractModel, model::AbstractModel)

Return a copy of the extension data data of the model model to the extension data of the new model new_model.

A method should be added for any JuMP extension storing data in the ext field.

This method should only be implemented by developers creating JuMP extensions. It should never be called by users of JuMP.

Warning

Do not engage in type piracy by implementing this method for types of data that you did not define! JuMP extensions should store types that they define in model.ext, rather than regular Julia types.

source

copy_model

JuMP.copy_modelFunction
copy_model(model::GenericModel; filter_constraints::Union{Nothing, Function}=nothing)

Return a copy of the model model and a GenericReferenceMap that can be used to obtain the variable and constraint reference of the new model corresponding to a given model's reference. A Base.copy(::AbstractModel) method has also been implemented, it is similar to copy_model but does not return the reference map.

If the filter_constraints argument is given, only the constraints for which this function returns true will be copied. This function is given a constraint reference as argument.

Note

Model copy is not supported in DIRECT mode, that is, when a model is constructed using the direct_model constructor instead of the Model constructor. Moreover, independently on whether an optimizer was provided at model construction, the new model will have no optimizer, that is, an optimizer will have to be provided to the new model in the optimize! call.

Example

In the following example, a model model is constructed with a variable x and a constraint cref. It is then copied into a model new_model with the new references assigned to x_new and cref_new.

julia> model = Model();
+ c2 : x ≤ 1
source

copy_extension_data

JuMP.copy_extension_dataFunction
copy_extension_data(data, new_model::AbstractModel, model::AbstractModel)

Return a copy of the extension data data of the model model to the extension data of the new model new_model.

A method should be added for any JuMP extension storing data in the ext field.

This method should only be implemented by developers creating JuMP extensions. It should never be called by users of JuMP.

Warning

Do not engage in type piracy by implementing this method for types of data that you did not define! JuMP extensions should store types that they define in model.ext, rather than regular Julia types.

source

copy_model

JuMP.copy_modelFunction
copy_model(model::GenericModel; filter_constraints::Union{Nothing, Function}=nothing)

Return a copy of the model model and a GenericReferenceMap that can be used to obtain the variable and constraint reference of the new model corresponding to a given model's reference. A Base.copy(::AbstractModel) method has also been implemented, it is similar to copy_model but does not return the reference map.

If the filter_constraints argument is given, only the constraints for which this function returns true will be copied. This function is given a constraint reference as argument.

Note

Model copy is not supported in DIRECT mode, that is, when a model is constructed using the direct_model constructor instead of the Model constructor. Moreover, independently on whether an optimizer was provided at model construction, the new model will have no optimizer, that is, an optimizer will have to be provided to the new model in the optimize! call.

Example

In the following example, a model model is constructed with a variable x and a constraint cref. It is then copied into a model new_model with the new references assigned to x_new and cref_new.

julia> model = Model();
 
 julia> @variable(model, x)
 x
@@ -664,7 +664,7 @@
 x
 
 julia> cref_new = reference_map[cref]
-cref : x = 2
source

delete

JuMP.deleteFunction
delete(model::GenericModel, con_ref::ConstraintRef)

Delete the constraint associated with constraint_ref from the model model.

Note that delete does not unregister the name from the model, so adding a new constraint of the same name will throw an error. Use unregister to unregister the name after deletion.

Example

julia> model = Model();
+cref : x = 2
source

delete

JuMP.deleteFunction
delete(model::GenericModel, con_ref::ConstraintRef)

Delete the constraint associated with constraint_ref from the model model.

Note that delete does not unregister the name from the model, so adding a new constraint of the same name will throw an error. Use unregister to unregister the name after deletion.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -682,7 +682,7 @@
 julia> model[:c]
 ERROR: KeyError: key :c not found
 Stacktrace:
-[...]
source
delete(model::GenericModel, con_refs::Vector{<:ConstraintRef})

Delete the constraints associated with con_refs from the model model.

Solvers may implement specialized methods for deleting multiple constraints of the same concrete type. These methods may be more efficient than repeatedly calling the single constraint delete method.

See also: unregister

Example

julia> model = Model();
+[...]
source
delete(model::GenericModel, con_refs::Vector{<:ConstraintRef})

Delete the constraints associated with con_refs from the model model.

Solvers may implement specialized methods for deleting multiple constraints of the same concrete type. These methods may be more efficient than repeatedly calling the single constraint delete method.

See also: unregister

Example

julia> model = Model();
 
 julia> @variable(model, x[1:3]);
 
@@ -703,7 +703,7 @@
 julia> model[:c]
 ERROR: KeyError: key :c not found
 Stacktrace:
-[...]
source
delete(model::GenericModel, variable_ref::GenericVariableRef)

Delete the variable associated with variable_ref from the model model.

Note that delete does not unregister the name from the model, so adding a new variable of the same name will throw an error. Use unregister to unregister the name after deletion.

Example

julia> model = Model();
+[...]
source
delete(model::GenericModel, variable_ref::GenericVariableRef)

Delete the variable associated with variable_ref from the model model.

Note that delete does not unregister the name from the model, so adding a new variable of the same name will throw an error. Use unregister to unregister the name after deletion.

Example

julia> model = Model();
 
 julia> @variable(model, x)
 x
@@ -719,7 +719,7 @@
 julia> model[:x]
 ERROR: KeyError: key :x not found
 Stacktrace:
-[...]
source
delete(model::GenericModel, variable_refs::Vector{<:GenericVariableRef})

Delete the variables associated with variable_refs from the model model. Solvers may implement methods for deleting multiple variables that are more efficient than repeatedly calling the single variable delete method.

See also: unregister

Example

julia> model = Model();
+[...]
source
delete(model::GenericModel, variable_refs::Vector{<:GenericVariableRef})

Delete the variables associated with variable_refs from the model model. Solvers may implement methods for deleting multiple variables that are more efficient than repeatedly calling the single variable delete method.

See also: unregister

Example

julia> model = Model();
 
 julia> @variable(model, x[1:2]);
 
@@ -734,7 +734,7 @@
 julia> model[:x]
 ERROR: KeyError: key :x not found
 Stacktrace:
-[...]
source

delete_lower_bound

JuMP.delete_lower_boundFunction
delete_lower_bound(v::GenericVariableRef)

Delete the lower bound constraint of a variable.

See also LowerBoundRef, has_lower_bound, lower_bound, set_lower_bound.

Example

julia> model = Model();
+[...]
source

delete_lower_bound

JuMP.delete_lower_boundFunction
delete_lower_bound(v::GenericVariableRef)

Delete the lower bound constraint of a variable.

See also LowerBoundRef, has_lower_bound, lower_bound, set_lower_bound.

Example

julia> model = Model();
 
 julia> @variable(model, x >= 1.0);
 
@@ -744,7 +744,7 @@
 julia> delete_lower_bound(x)
 
 julia> has_lower_bound(x)
-false
source

delete_upper_bound

JuMP.delete_upper_boundFunction
delete_upper_bound(v::GenericVariableRef)

Delete the upper bound constraint of a variable.

Errors if one does not exist.

See also UpperBoundRef, has_upper_bound, upper_bound, set_upper_bound.

Example

julia> model = Model();
+false
source

delete_upper_bound

JuMP.delete_upper_boundFunction
delete_upper_bound(v::GenericVariableRef)

Delete the upper bound constraint of a variable.

Errors if one does not exist.

See also UpperBoundRef, has_upper_bound, upper_bound, set_upper_bound.

Example

julia> model = Model();
 
 julia> @variable(model, x <= 1.0);
 
@@ -754,10 +754,10 @@
 julia> delete_upper_bound(x)
 
 julia> has_upper_bound(x)
-false
source

direct_generic_model

JuMP.direct_generic_modelFunction
direct_generic_model(
+false
source

direct_generic_model

JuMP.direct_generic_modelFunction
direct_generic_model(
     value_type::Type{T},
     backend::MOI.ModelLike;
-) where {T<:Real}

Return a new JuMP model using backend to store the model and solve it.

As opposed to the Model constructor, no cache of the model is stored outside of backend and no bridges are automatically applied to backend.

Notes

The absence of a cache reduces the memory footprint but, it is important to bear in mind the following implications of creating models using this direct mode:

  • When backend does not support an operation, such as modifying constraints or adding variables/constraints after solving, an error is thrown. For models created using the Model constructor, such situations can be dealt with by storing the modifications in a cache and loading them into the optimizer when optimize! is called.
  • No constraint bridging is supported by default.
  • The optimizer used cannot be changed the model is constructed.
  • The model created cannot be copied.
source
direct_generic_model(::Type{T}, factory::MOI.OptimizerWithAttributes)

Create a direct_generic_model using factory, a MOI.OptimizerWithAttributes object created by optimizer_with_attributes.

Example

julia> import HiGHS
+) where {T<:Real}

Return a new JuMP model using backend to store the model and solve it.

As opposed to the Model constructor, no cache of the model is stored outside of backend and no bridges are automatically applied to backend.

Notes

The absence of a cache reduces the memory footprint but, it is important to bear in mind the following implications of creating models using this direct mode:

  • When backend does not support an operation, such as modifying constraints or adding variables/constraints after solving, an error is thrown. For models created using the Model constructor, such situations can be dealt with by storing the modifications in a cache and loading them into the optimizer when optimize! is called.
  • No constraint bridging is supported by default.
  • The optimizer used cannot be changed the model is constructed.
  • The model created cannot be copied.
source
direct_generic_model(::Type{T}, factory::MOI.OptimizerWithAttributes)

Create a direct_generic_model using factory, a MOI.OptimizerWithAttributes object created by optimizer_with_attributes.

Example

julia> import HiGHS
 
 julia> optimizer = optimizer_with_attributes(
            HiGHS.Optimizer,
@@ -785,7 +785,7 @@
 
 julia> set_attribute(model, "presolve", "off")
 
-julia> set_attribute(model, MOI.Silent(), true)
source

direct_model

JuMP.direct_modelFunction
direct_model(backend::MOI.ModelLike)

Return a new JuMP model using backend to store the model and solve it.

As opposed to the Model constructor, no cache of the model is stored outside of backend and no bridges are automatically applied to backend.

Notes

The absence of a cache reduces the memory footprint but, it is important to bear in mind the following implications of creating models using this direct mode:

  • When backend does not support an operation, such as modifying constraints or adding variables/constraints after solving, an error is thrown. For models created using the Model constructor, such situations can be dealt with by storing the modifications in a cache and loading them into the optimizer when optimize! is called.
  • No constraint bridging is supported by default.
  • The optimizer used cannot be changed the model is constructed.
  • The model created cannot be copied.
source
direct_model(factory::MOI.OptimizerWithAttributes)

Create a direct_model using factory, a MOI.OptimizerWithAttributes object created by optimizer_with_attributes.

Example

julia> import HiGHS
+julia> set_attribute(model, MOI.Silent(), true)
source

direct_model

JuMP.direct_modelFunction
direct_model(backend::MOI.ModelLike)

Return a new JuMP model using backend to store the model and solve it.

As opposed to the Model constructor, no cache of the model is stored outside of backend and no bridges are automatically applied to backend.

Notes

The absence of a cache reduces the memory footprint but, it is important to bear in mind the following implications of creating models using this direct mode:

  • When backend does not support an operation, such as modifying constraints or adding variables/constraints after solving, an error is thrown. For models created using the Model constructor, such situations can be dealt with by storing the modifications in a cache and loading them into the optimizer when optimize! is called.
  • No constraint bridging is supported by default.
  • The optimizer used cannot be changed the model is constructed.
  • The model created cannot be copied.
source
direct_model(factory::MOI.OptimizerWithAttributes)

Create a direct_model using factory, a MOI.OptimizerWithAttributes object created by optimizer_with_attributes.

Example

julia> import HiGHS
 
 julia> optimizer = optimizer_with_attributes(
            HiGHS.Optimizer,
@@ -813,7 +813,7 @@
 
 julia> set_attribute(model, "presolve", "off")
 
-julia> set_attribute(model, MOI.Silent(), true)
source

drop_zeros!

JuMP.drop_zeros!Function
drop_zeros!(expr::GenericAffExpr)

Remove terms in the affine expression with 0 coefficients.

Example

julia> model = Model();
+julia> set_attribute(model, MOI.Silent(), true)
source

drop_zeros!

JuMP.drop_zeros!Function
drop_zeros!(expr::GenericAffExpr)

Remove terms in the affine expression with 0 coefficients.

Example

julia> model = Model();
 
 julia> @variable(model, x[1:2]);
 
@@ -825,7 +825,7 @@
 julia> drop_zeros!(expr)
 
 julia> expr
-x[2]
source
drop_zeros!(expr::GenericQuadExpr)

Remove terms in the quadratic expression with 0 coefficients.

Example

julia> model = Model();
+x[2]
source
drop_zeros!(expr::GenericQuadExpr)

Remove terms in the quadratic expression with 0 coefficients.

Example

julia> model = Model();
 
 julia> @variable(model, x[1:2]);
 
@@ -837,7 +837,7 @@
 julia> drop_zeros!(expr)
 
 julia> expr
-x[2]²
source

dual

JuMP.dualFunction
dual(con_ref::ConstraintRef; result::Int = 1)

Return the dual value of constraint con_ref associated with result index result of the most-recent solution returned by the solver.

Use has_duals to check if a result exists before asking for values.

See also: result_count, shadow_price.

Example

julia> import HiGHS
+x[2]²
source

dual

JuMP.dualFunction
dual(con_ref::ConstraintRef; result::Int = 1)

Return the dual value of constraint con_ref associated with result index result of the most-recent solution returned by the solver.

Use has_duals to check if a result exists before asking for values.

See also: result_count, shadow_price.

Example

julia> import HiGHS
 
 julia> model = Model(HiGHS.Optimizer);
 
@@ -856,7 +856,7 @@
 true
 
 julia> dual(c)
--2.0
source

dual_objective_value

JuMP.dual_objective_valueFunction
dual_objective_value(model::GenericModel; result::Int = 1)

Return the value of the objective of the dual problem associated with result index result of the most-recent solution returned by the solver.

Throws MOI.UnsupportedAttribute{MOI.DualObjectiveValue} if the solver does not support this attribute.

This function is equivalent to querying the MOI.DualObjectiveValue attribute.

See also: result_count.

Example

julia> import HiGHS
+-2.0
source

dual_objective_value

JuMP.dual_objective_valueFunction
dual_objective_value(model::GenericModel; result::Int = 1)

Return the value of the objective of the dual problem associated with result index result of the most-recent solution returned by the solver.

Throws MOI.UnsupportedAttribute{MOI.DualObjectiveValue} if the solver does not support this attribute.

This function is equivalent to querying the MOI.DualObjectiveValue attribute.

See also: result_count.

Example

julia> import HiGHS
 
 julia> model = Model(HiGHS.Optimizer);
 
@@ -874,7 +874,7 @@
 julia> dual_objective_value(model; result = 2)
 ERROR: Result index of attribute MathOptInterface.DualObjectiveValue(2) out of bounds. There are currently 1 solution(s) in the model.
 Stacktrace:
-[...]
source

dual_shape

JuMP.dual_shapeFunction
dual_shape(shape::AbstractShape)::AbstractShape

Returns the shape of the dual space of the space of objects of shape shape. By default, the dual_shape of a shape is itself. See the examples section below for an example for which this is not the case.

Example

Consider polynomial constraints for which the dual is moment constraints and moment constraints for which the dual is polynomial constraints. Shapes for polynomials can be defined as follows:

struct Polynomial
+[...]
source

dual_shape

JuMP.dual_shapeFunction
dual_shape(shape::AbstractShape)::AbstractShape

Returns the shape of the dual space of the space of objects of shape shape. By default, the dual_shape of a shape is itself. See the examples section below for an example for which this is not the case.

Example

Consider polynomial constraints for which the dual is moment constraints and moment constraints for which the dual is polynomial constraints. Shapes for polynomials can be defined as follows:

struct Polynomial
     coefficients::Vector{Float64}
     monomials::Vector{Monomial}
 end
@@ -889,7 +889,7 @@
     monomials::Vector{Monomial}
 end
 JuMP.reshape_vector(x::Vector, shape::MomentsShape) = Moments(x, shape.monomials)

Then dual_shape allows the definition of the shape of the dual of polynomial and moment constraints:

dual_shape(shape::PolynomialShape) = MomentsShape(shape.monomials)
-dual_shape(shape::MomentsShape) = PolynomialShape(shape.monomials)
source

dual_start_value

JuMP.dual_start_valueFunction
dual_start_value(con_ref::ConstraintRef)

Return the dual start value (MOI attribute ConstraintDualStart) of the constraint con_ref.

If no dual start value has been set, dual_start_value will return nothing.

See also set_dual_start_value.

Example

julia> model = Model();
+dual_shape(shape::MomentsShape) = PolynomialShape(shape.monomials)
source

dual_start_value

JuMP.dual_start_valueFunction
dual_start_value(con_ref::ConstraintRef)

Return the dual start value (MOI attribute ConstraintDualStart) of the constraint con_ref.

If no dual start value has been set, dual_start_value will return nothing.

See also set_dual_start_value.

Example

julia> model = Model();
 
 julia> @variable(model, x, start = 2.0);
 
@@ -904,19 +904,19 @@
 
 julia> set_dual_start_value(c, nothing)
 
-julia> dual_start_value(c)
source

dual_status

JuMP.dual_statusFunction
dual_status(model::GenericModel; result::Int = 1)

Return a MOI.ResultStatusCode describing the status of the most recent dual solution of the solver (that is, the MOI.DualStatus attribute) associated with the result index result.

See also: result_count.

Example

julia> import Ipopt
+julia> dual_start_value(c)
source

dual_status

JuMP.dual_statusFunction
dual_status(model::GenericModel; result::Int = 1)

Return a MOI.ResultStatusCode describing the status of the most recent dual solution of the solver (that is, the MOI.DualStatus attribute) associated with the result index result.

See also: result_count.

Example

julia> import Ipopt
 
 julia> model = Model(Ipopt.Optimizer);
 
 julia> dual_status(model; result = 2)
-NO_SOLUTION::ResultStatusCode = 0
source

error_if_direct_mode

JuMP.error_if_direct_modeFunction
error_if_direct_mode(model::GenericModel, func::Symbol)

Errors if model is in direct mode during a call from the function named func.

Used internally within JuMP, or by JuMP extensions who do not want to support models in direct mode.

Example

julia> import HiGHS
+NO_SOLUTION::ResultStatusCode = 0
source

error_if_direct_mode

JuMP.error_if_direct_modeFunction
error_if_direct_mode(model::GenericModel, func::Symbol)

Errors if model is in direct mode during a call from the function named func.

Used internally within JuMP, or by JuMP extensions who do not want to support models in direct mode.

Example

julia> import HiGHS
 
 julia> model = direct_model(HiGHS.Optimizer());
 
 julia> error_if_direct_mode(model, :foo)
 ERROR: The `foo` function is not supported in DIRECT mode.
 Stacktrace:
-[...]
source

fix

JuMP.fixFunction
fix(v::GenericVariableRef, value::Number; force::Bool = false)

Fix a variable to a value. Update the fixing constraint if one exists, otherwise create a new one.

If the variable already has variable bounds and force=false, calling fix will throw an error. If force=true, existing variable bounds will be deleted, and the fixing constraint will be added. Note a variable will have no bounds after a call to unfix.

See also FixRef, is_fixed, fix_value, unfix.

Example

julia> model = Model();
+[...]
source

fix

JuMP.fixFunction
fix(v::GenericVariableRef, value::Number; force::Bool = false)

Fix a variable to a value. Update the fixing constraint if one exists, otherwise create a new one.

If the variable already has variable bounds and force=false, calling fix will throw an error. If force=true, existing variable bounds will be deleted, and the fixing constraint will be added. Note a variable will have no bounds after a call to unfix.

See also FixRef, is_fixed, fix_value, unfix.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -936,7 +936,7 @@
 julia> fix(x, 1.0; force = true)
 
 julia> is_fixed(x)
-true
source

fix_discrete_variables

JuMP.fix_discrete_variablesFunction
fix_discrete_variables([var_value::Function = value,] model::GenericModel)

Modifies model to convert all binary and integer variables to continuous variables with fixed bounds of var_value(x).

Return

Returns a function that can be called without any arguments to restore the original model. The behavior of this function is undefined if additional changes are made to the affected variables in the meantime.

Notes

  • An error is thrown if semi-continuous or semi-integer constraints are present (support may be added for these in the future).
  • All other constraints are ignored (left in place). This includes discrete constraints like SOS and indicator constraints.

Example

julia> model = Model();
+true
source

fix_discrete_variables

JuMP.fix_discrete_variablesFunction
fix_discrete_variables([var_value::Function = value,] model::GenericModel)

Modifies model to convert all binary and integer variables to continuous variables with fixed bounds of var_value(x).

Return

Returns a function that can be called without any arguments to restore the original model. The behavior of this function is undefined if additional changes are made to the affected variables in the meantime.

Notes

  • An error is thrown if semi-continuous or semi-integer constraints are present (support may be added for these in the future).
  • All other constraints are ignored (left in place). This includes discrete constraints like SOS and indicator constraints.

Example

julia> model = Model();
 
 julia> @variable(model, x, Bin, start = 1);
 
@@ -960,12 +960,12 @@
  y ≥ 1
  y ≤ 10
  y integer
- x binary
source

fix_value

JuMP.fix_valueFunction
fix_value(v::GenericVariableRef)

Return the value to which a variable is fixed.

Error if one does not exist.

See also FixRef, is_fixed, fix, unfix.

Example

julia> model = Model();
+ x binary
source

fix_value

JuMP.fix_valueFunction
fix_value(v::GenericVariableRef)

Return the value to which a variable is fixed.

Error if one does not exist.

See also FixRef, is_fixed, fix, unfix.

Example

julia> model = Model();
 
 julia> @variable(model, x == 1);
 
 julia> fix_value(x)
-1.0
source

flatten!

JuMP.flatten!Function
flatten!(expr::GenericNonlinearExpr)

Flatten a nonlinear expression in-place by lifting nested + and * nodes into a single n-ary operation.

Motivation

Nonlinear expressions created using operator overloading can be deeply nested and unbalanced. For example, prod(x for i in 1:4) creates *(x, *(x, *(x, x))) instead of the more preferable *(x, x, x, x).

Example

julia> model = Model();
+1.0
source

flatten!

JuMP.flatten!Function
flatten!(expr::GenericNonlinearExpr)

Flatten a nonlinear expression in-place by lifting nested + and * nodes into a single n-ary operation.

Motivation

Nonlinear expressions created using operator overloading can be deeply nested and unbalanced. For example, prod(x for i in 1:4) creates *(x, *(x, *(x, x))) instead of the more preferable *(x, x, x, x).

Example

julia> model = Model();
 
 julia> @variable(model, x)
 x
@@ -977,7 +977,7 @@
 (x²) * x * x
 
 julia> flatten!(sin(prod(x for i in 1:4)))
-sin((x²) * x * x)
source

function_string

JuMP.function_stringFunction
function_string(
+sin((x²) * x * x)
source

function_string

JuMP.function_stringFunction
function_string(
     mode::MIME,
     func::Union{JuMP.AbstractJuMPScalar,Vector{<:JuMP.AbstractJuMPScalar}},
 )

Return a String representing the function func using print mode mode.

Example

julia> model = Model();
@@ -985,7 +985,7 @@
 julia> @variable(model, x);
 
 julia> function_string(MIME("text/plain"), 2 * x + 1)
-"2 x + 1"
source

get_attribute

JuMP.get_attributeFunction
get_attribute(model::GenericModel, attr::MOI.AbstractModelAttribute)
+"2 x + 1"
source

get_attribute

JuMP.get_attributeFunction
get_attribute(model::GenericModel, attr::MOI.AbstractModelAttribute)
 get_attribute(x::GenericVariableRef, attr::MOI.AbstractVariableAttribute)
 get_attribute(cr::ConstraintRef, attr::MOI.AbstractConstraintAttribute)

Get the value of a solver-specifc attribute attr.

This is equivalent to calling MOI.get with the associated MOI model and, for variables and constraints, with the associated MOI.VariableIndex or MOI.ConstraintIndex.

Example

julia> model = Model();
 
@@ -1002,7 +1002,7 @@
 "x"
 
 julia> get_attribute(c, MOI.ConstraintName())
-"c"
source
get_attribute(
+"c"
source
get_attribute(
     model::Union{GenericModel,MOI.OptimizerWithAttributes},
     attr::Union{AbstractString,MOI.AbstractOptimizerAttribute},
 )

Get the value of a solver-specifc attribute attr.

This is equivalent to calling MOI.get with the associated MOI model.

If attr is an AbstractString, it is converted to MOI.RawOptimizerAttribute.

Example

julia> import HiGHS
@@ -1021,7 +1021,7 @@
 true
 
 julia> get_attribute(opt, MOI.RawOptimizerAttribute("output_flag"))
-true
source

has_duals

JuMP.has_dualsFunction
has_duals(model::GenericModel; result::Int = 1)

Return true if the solver has a dual solution in result index result available to query, otherwise return false.

See also dual, shadow_price, and result_count.

Example

julia> import HiGHS
+true
source

has_duals

JuMP.has_dualsFunction
has_duals(model::GenericModel; result::Int = 1)

Return true if the solver has a dual solution in result index result available to query, otherwise return false.

See also dual, shadow_price, and result_count.

Example

julia> import HiGHS
 
 julia> model = Model(HiGHS.Optimizer);
 
@@ -1040,12 +1040,12 @@
 julia> optimize!(model)
 
 julia> has_duals(model)
-true
source

has_lower_bound

JuMP.has_lower_boundFunction
has_lower_bound(v::GenericVariableRef)

Return true if v has a lower bound. If true, the lower bound can be queried with lower_bound.

See also LowerBoundRef, lower_bound, set_lower_bound, delete_lower_bound.

Example

julia> model = Model();
+true
source

has_lower_bound

JuMP.has_lower_boundFunction
has_lower_bound(v::GenericVariableRef)

Return true if v has a lower bound. If true, the lower bound can be queried with lower_bound.

See also LowerBoundRef, lower_bound, set_lower_bound, delete_lower_bound.

Example

julia> model = Model();
 
 julia> @variable(model, x >= 1.0);
 
 julia> has_lower_bound(x)
-true
source

has_start_value

JuMP.has_start_valueFunction
has_start_value(variable::AbstractVariableRef)

Return true if the variable has a start value set, otherwise return false.

See also: start_value, set_start_value.

Example

julia> model = Model();
+true
source

has_start_value

JuMP.has_start_valueFunction
has_start_value(variable::AbstractVariableRef)

Return true if the variable has a start value set, otherwise return false.

See also: start_value, set_start_value.

Example

julia> model = Model();
 
 julia> @variable(model, x, start = 1.5);
 
@@ -1066,12 +1066,12 @@
 true
 
 julia> start_value(y)
-2.0
source

has_upper_bound

JuMP.has_upper_boundFunction
has_upper_bound(v::GenericVariableRef)

Return true if v has a upper bound. If true, the upper bound can be queried with upper_bound.

See also UpperBoundRef, upper_bound, set_upper_bound, delete_upper_bound.

Example

julia> model = Model();
+2.0
source

has_upper_bound

JuMP.has_upper_boundFunction
has_upper_bound(v::GenericVariableRef)

Return true if v has a upper bound. If true, the upper bound can be queried with upper_bound.

See also UpperBoundRef, upper_bound, set_upper_bound, delete_upper_bound.

Example

julia> model = Model();
 
 julia> @variable(model, x <= 1.0);
 
 julia> has_upper_bound(x)
-true
source

has_values

JuMP.has_valuesFunction
has_values(model::GenericModel; result::Int = 1)

Return true if the solver has a primal solution in result index result available to query, otherwise return false.

See also value and result_count.

Example

julia> import HiGHS
+true
source

has_values

JuMP.has_valuesFunction
has_values(model::GenericModel; result::Int = 1)

Return true if the solver has a primal solution in result index result available to query, otherwise return false.

See also value and result_count.

Example

julia> import HiGHS
 
 julia> model = Model(HiGHS.Optimizer);
 
@@ -1090,25 +1090,25 @@
 julia> optimize!(model)
 
 julia> has_values(model)
-true
source

in_set_string

JuMP.in_set_stringFunction
in_set_string(mode::MIME, set)

Return a String representing the membership to the set set using print mode mode.

Extensions

JuMP extensions may extend this method for new set types to improve the legibility of their printing.

Example

julia> in_set_string(MIME("text/plain"), MOI.Interval(1.0, 2.0))
-"∈ [1, 2]"
source

index

JuMP.indexFunction
index(cr::ConstraintRef)::MOI.ConstraintIndex

Return the index of the constraint that corresponds to cr in the MOI backend.

Example

julia> model = Model();
+true
source

in_set_string

JuMP.in_set_stringFunction
in_set_string(mode::MIME, set)

Return a String representing the membership to the set set using print mode mode.

Extensions

JuMP extensions may extend this method for new set types to improve the legibility of their printing.

Example

julia> in_set_string(MIME("text/plain"), MOI.Interval(1.0, 2.0))
+"∈ [1, 2]"
source

index

JuMP.indexFunction
index(cr::ConstraintRef)::MOI.ConstraintIndex

Return the index of the constraint that corresponds to cr in the MOI backend.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
 julia> @constraint(model, c, x >= 0);
 
 julia> index(c)
-MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{Float64}, MathOptInterface.GreaterThan{Float64}}(1)
source
index(v::GenericVariableRef)::MOI.VariableIndex

Return the index of the variable that corresponds to v in the MOI backend.

Example

julia> model = Model();
+MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{Float64}, MathOptInterface.GreaterThan{Float64}}(1)
source
index(v::GenericVariableRef)::MOI.VariableIndex

Return the index of the variable that corresponds to v in the MOI backend.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
 julia> index(x)
-MOI.VariableIndex(1)
source

is_binary

JuMP.is_binaryFunction
is_binary(v::GenericVariableRef)

Return true if v is constrained to be binary.

See also BinaryRef, set_binary, unset_binary.

Example

julia> model = Model();
+MOI.VariableIndex(1)
source

is_binary

JuMP.is_binaryFunction
is_binary(v::GenericVariableRef)

Return true if v is constrained to be binary.

See also BinaryRef, set_binary, unset_binary.

Example

julia> model = Model();
 
 julia> @variable(model, x, Bin);
 
 julia> is_binary(x)
-true
source

is_fixed

JuMP.is_fixedFunction
is_fixed(v::GenericVariableRef)

Return true if v is a fixed variable. If true, the fixed value can be queried with fix_value.

See also FixRef, fix_value, fix, unfix.

Example

julia> model = Model();
+true
source

is_fixed

JuMP.is_fixedFunction
is_fixed(v::GenericVariableRef)

Return true if v is a fixed variable. If true, the fixed value can be queried with fix_value.

See also FixRef, fix_value, fix, unfix.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -1118,7 +1118,7 @@
 julia> fix(x, 1.0)
 
 julia> is_fixed(x)
-true
source

is_integer

JuMP.is_integerFunction
is_integer(v::GenericVariableRef)

Return true if v is constrained to be integer.

See also IntegerRef, set_integer, unset_integer.

Example

julia> model = Model();
+true
source

is_integer

JuMP.is_integerFunction
is_integer(v::GenericVariableRef)

Return true if v is constrained to be integer.

See also IntegerRef, set_integer, unset_integer.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -1128,7 +1128,7 @@
 julia> set_integer(x)
 
 julia> is_integer(x)
-true
source

is_parameter

JuMP.is_parameterFunction
is_parameter(x::GenericVariableRef)::Bool

Return true if x is constrained to be a parameter.

See also ParameterRef, set_parameter_value, parameter_value.

Example

julia> model = Model();
+true
source

is_parameter

JuMP.is_parameterFunction
is_parameter(x::GenericVariableRef)::Bool

Return true if x is constrained to be a parameter.

See also ParameterRef, set_parameter_value, parameter_value.

Example

julia> model = Model();
 
 julia> @variable(model, p in Parameter(2))
 p
@@ -1140,7 +1140,7 @@
 x
 
 julia> is_parameter(x)
-false
source

is_solved_and_feasible

JuMP.is_solved_and_feasibleFunction
is_solved_and_feasible(
+false
source

is_solved_and_feasible

JuMP.is_solved_and_feasibleFunction
is_solved_and_feasible(
     model::GenericModel;
     allow_local::Bool = true,
     allow_almost::Bool = false,
@@ -1151,7 +1151,7 @@
 julia> model = Model(Ipopt.Optimizer);
 
 julia> is_solved_and_feasible(model)
-false
source

is_valid

JuMP.is_validFunction
is_valid(model::GenericModel, con_ref::ConstraintRef{<:AbstractModel})

Return true if con_ref refers to a valid constraint in model.

Example

julia> model = Model();
+false
source

is_valid

JuMP.is_validFunction
is_valid(model::GenericModel, con_ref::ConstraintRef{<:AbstractModel})

Return true if con_ref refers to a valid constraint in model.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -1163,7 +1163,7 @@
 julia> model_2 = Model();
 
 julia> is_valid(model_2, c)
-false
source
is_valid(model::GenericModel, variable_ref::GenericVariableRef)

Return true if variable refers to a valid variable in model.

Example

julia> model = Model();
+false
source
is_valid(model::GenericModel, variable_ref::GenericVariableRef)

Return true if variable refers to a valid variable in model.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -1173,7 +1173,7 @@
 julia> model_2 = Model();
 
 julia> is_valid(model_2, x)
-false
source

isequal_canonical

JuMP.isequal_canonicalFunction
isequal_canonical(
+false
source

isequal_canonical

JuMP.isequal_canonicalFunction
isequal_canonical(
     x::T,
     y::T
 ) where {T<:AbstractJuMPScalar,AbstractArray{<:AbstractJuMPScalar}}

Return true if x is equal to y after dropping zeros and disregarding the order.

This method is mainly useful for testing, because fallbacks like x == y do not account for valid mathematical comparisons like x[1] + 0 x[2] + 1 == x[1] + 1.

Example

julia> model = Model();
@@ -1193,7 +1193,7 @@
 false
 
 julia> isequal_canonical(a, b)
-true
source

jump_function

JuMP.jump_functionFunction
jump_function(model::AbstractModel, x::MOI.AbstractFunction)

Given an MathOptInterface object x, return the JuMP equivalent.

See also: moi_function.

Example

julia> model = Model();
+true
source

jump_function

JuMP.jump_functionFunction
jump_function(model::AbstractModel, x::MOI.AbstractFunction)

Given an MathOptInterface object x, return the JuMP equivalent.

See also: moi_function.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -1201,10 +1201,10 @@
 1.0 + 2.0 MOI.VariableIndex(1)
 
 julia> jump_function(model, f)
-2 x + 1
source

jump_function_type

JuMP.jump_function_typeFunction
jump_function_type(model::AbstractModel, ::Type{T}) where {T}

Given an MathOptInterface object type T, return the JuMP equivalent.

See also: moi_function_type.

Example

julia> model = Model();
+2 x + 1
source

jump_function_type

JuMP.jump_function_typeFunction
jump_function_type(model::AbstractModel, ::Type{T}) where {T}

Given an MathOptInterface object type T, return the JuMP equivalent.

See also: moi_function_type.

Example

julia> model = Model();
 
 julia> jump_function_type(model, MOI.ScalarAffineFunction{Float64})
-AffExpr (alias for GenericAffExpr{Float64, GenericVariableRef{Float64}})
source

latex_formulation

JuMP.latex_formulationFunction
latex_formulation(model::AbstractModel)

Wrap model in a type so that it can be pretty-printed as text/latex in a notebook like IJulia, or in Documenter.

To render the model, end the cell with latex_formulation(model), or call display(latex_formulation(model)) in to force the display of the model from inside a function.

source

linear_terms

JuMP.linear_termsFunction
linear_terms(aff::GenericAffExpr{C,V})

Provides an iterator over coefficient-variable tuples (a_i::C, x_i::V) in the linear part of the affine expression.

source
linear_terms(quad::GenericQuadExpr{C,V})

Provides an iterator over tuples (coefficient::C, variable::V) in the linear part of the quadratic expression.

source

list_of_constraint_types

JuMP.list_of_constraint_typesFunction
list_of_constraint_types(model::GenericModel)::Vector{Tuple{Type,Type}}

Return a list of tuples of the form (F, S) where F is a JuMP function type and S is an MOI set type such that all_constraints(model, F, S) returns a nonempty list.

Example

julia> model = Model();
+AffExpr (alias for GenericAffExpr{Float64, GenericVariableRef{Float64}})
source

latex_formulation

JuMP.latex_formulationFunction
latex_formulation(model::AbstractModel)

Wrap model in a type so that it can be pretty-printed as text/latex in a notebook like IJulia, or in Documenter.

To render the model, end the cell with latex_formulation(model), or call display(latex_formulation(model)) in to force the display of the model from inside a function.

source

linear_terms

JuMP.linear_termsFunction
linear_terms(aff::GenericAffExpr{C,V})

Provides an iterator over coefficient-variable tuples (a_i::C, x_i::V) in the linear part of the affine expression.

source
linear_terms(quad::GenericQuadExpr{C,V})

Provides an iterator over tuples (coefficient::C, variable::V) in the linear part of the quadratic expression.

source

list_of_constraint_types

JuMP.list_of_constraint_typesFunction
list_of_constraint_types(model::GenericModel)::Vector{Tuple{Type,Type}}

Return a list of tuples of the form (F, S) where F is a JuMP function type and S is an MOI set type such that all_constraints(model, F, S) returns a nonempty list.

Example

julia> model = Model();
 
 julia> @variable(model, x >= 0, Bin);
 
@@ -1214,12 +1214,12 @@
 3-element Vector{Tuple{Type, Type}}:
  (AffExpr, MathOptInterface.LessThan{Float64})
  (VariableRef, MathOptInterface.GreaterThan{Float64})
- (VariableRef, MathOptInterface.ZeroOne)

Performance considerations

Iterating over the list of function and set types is a type-unstable operation. Consider using a function barrier. See the Performance tips for extensions section of the documentation for more details.

source

lower_bound

JuMP.lower_boundFunction
lower_bound(v::GenericVariableRef)

Return the lower bound of a variable. Error if one does not exist.

See also LowerBoundRef, has_lower_bound, set_lower_bound, delete_lower_bound.

Example

julia> model = Model();
+ (VariableRef, MathOptInterface.ZeroOne)

Performance considerations

Iterating over the list of function and set types is a type-unstable operation. Consider using a function barrier. See the Performance tips for extensions section of the documentation for more details.

source

lower_bound

JuMP.lower_boundFunction
lower_bound(v::GenericVariableRef)

Return the lower bound of a variable. Error if one does not exist.

See also LowerBoundRef, has_lower_bound, set_lower_bound, delete_lower_bound.

Example

julia> model = Model();
 
 julia> @variable(model, x >= 1.0);
 
 julia> lower_bound(x)
-1.0
source

lp_matrix_data

JuMP.lp_matrix_dataFunction
lp_matrix_data(model::GenericModel{T})

Given a JuMP model of a linear program, return an LPMatrixData{T} struct storing data for an equivalent linear program in the form:

\[\begin{aligned} +1.0

source

lp_matrix_data

JuMP.lp_matrix_dataFunction
lp_matrix_data(model::GenericModel{T})

Given a JuMP model of a linear program, return an LPMatrixData{T} struct storing data for an equivalent linear program in the form:

\[\begin{aligned} \min & c^\top x + c_0 \\ & b_l \le A x \le b_u \\ & x_l \le x \le x_u @@ -1264,7 +1264,7 @@ 0.0 julia> data.sense -MAX_SENSE::OptimizationSense = 1

source

lp_sensitivity_report

JuMP.lp_sensitivity_reportFunction
lp_sensitivity_report(model::GenericModel{T}; atol::T = Base.rtoldefault(T))::SensitivityReport{T} where {T}

Given a linear program model with a current optimal basis, return a SensitivityReport object, which maps:

  • Every variable reference to a tuple (d_lo, d_hi)::Tuple{T,T}, explaining how much the objective coefficient of the corresponding variable can change by, such that the original basis remains optimal.
  • Every constraint reference to a tuple (d_lo, d_hi)::Tuple{T,T}, explaining how much the right-hand side of the corresponding constraint can change by, such that the basis remains optimal.

Both tuples are relative, rather than absolute. So given a objective coefficient of 1.0 and a tuple (-0.5, 0.5), the objective coefficient can range between 1.0 - 0.5 an 1.0 + 0.5.

atol is the primal/dual optimality tolerance, and should match the tolerance of the solver used to compute the basis.

Note: interval constraints are NOT supported.

Example

julia> import HiGHS
+MAX_SENSE::OptimizationSense = 1
source

lp_sensitivity_report

JuMP.lp_sensitivity_reportFunction
lp_sensitivity_report(model::GenericModel{T}; atol::T = Base.rtoldefault(T))::SensitivityReport{T} where {T}

Given a linear program model with a current optimal basis, return a SensitivityReport object, which maps:

  • Every variable reference to a tuple (d_lo, d_hi)::Tuple{T,T}, explaining how much the objective coefficient of the corresponding variable can change by, such that the original basis remains optimal.
  • Every constraint reference to a tuple (d_lo, d_hi)::Tuple{T,T}, explaining how much the right-hand side of the corresponding constraint can change by, such that the basis remains optimal.

Both tuples are relative, rather than absolute. So given a objective coefficient of 1.0 and a tuple (-0.5, 0.5), the objective coefficient can range between 1.0 - 0.5 an 1.0 + 0.5.

atol is the primal/dual optimality tolerance, and should match the tolerance of the solver used to compute the basis.

Note: interval constraints are NOT supported.

Example

julia> import HiGHS
 
 julia> model = Model(HiGHS.Optimizer);
 
@@ -1296,7 +1296,7 @@
            "The lower bound of `x` can decrease by $dRHS_lo or increase " *
            "by $dRHS_hi."
        )
-The lower bound of `x` can decrease by -Inf or increase by 3.0.
source

map_coefficients

JuMP.map_coefficientsFunction
map_coefficients(f::Function, a::GenericAffExpr)

Apply f to the coefficients and constant term of an GenericAffExpr a and return a new expression.

See also: map_coefficients_inplace!

Example

julia> model = Model();
+The lower bound of `x` can decrease by -Inf or increase by 3.0.
source

map_coefficients

JuMP.map_coefficientsFunction
map_coefficients(f::Function, a::GenericAffExpr)

Apply f to the coefficients and constant term of an GenericAffExpr a and return a new expression.

See also: map_coefficients_inplace!

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -1307,7 +1307,7 @@
 2 x + 2
 
 julia> a
-x + 1
source
map_coefficients(f::Function, a::GenericQuadExpr)

Apply f to the coefficients and constant term of an GenericQuadExpr a and return a new expression.

See also: map_coefficients_inplace!

Example

julia> model = Model();
+x + 1
source
map_coefficients(f::Function, a::GenericQuadExpr)

Apply f to the coefficients and constant term of an GenericQuadExpr a and return a new expression.

See also: map_coefficients_inplace!

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -1318,7 +1318,7 @@
 2 x² + 2 x + 2
 
 julia> a
-x² + x + 1
source

map_coefficients_inplace!

JuMP.map_coefficients_inplace!Function
map_coefficients_inplace!(f::Function, a::GenericAffExpr)

Apply f to the coefficients and constant term of an GenericAffExpr a and update them in-place.

See also: map_coefficients

Example

julia> model = Model();
+x² + x + 1
source

map_coefficients_inplace!

JuMP.map_coefficients_inplace!Function
map_coefficients_inplace!(f::Function, a::GenericAffExpr)

Apply f to the coefficients and constant term of an GenericAffExpr a and update them in-place.

See also: map_coefficients

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -1329,7 +1329,7 @@
 2 x + 2
 
 julia> a
-2 x + 2
source
map_coefficients_inplace!(f::Function, a::GenericQuadExpr)

Apply f to the coefficients and constant term of an GenericQuadExpr a and update them in-place.

See also: map_coefficients

Example

julia> model = Model();
+2 x + 2
source
map_coefficients_inplace!(f::Function, a::GenericQuadExpr)

Apply f to the coefficients and constant term of an GenericQuadExpr a and update them in-place.

See also: map_coefficients

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -1340,10 +1340,10 @@
 2 x² + 2 x + 2
 
 julia> a
-2 x² + 2 x + 2
source

mode

JuMP.modeFunction
mode(model::GenericModel)

Return the ModelMode of model.

Example

julia> model = Model();
+2 x² + 2 x + 2
source

mode

JuMP.modeFunction
mode(model::GenericModel)

Return the ModelMode of model.

Example

julia> model = Model();
 
 julia> mode(model)
-AUTOMATIC::ModelMode = 0
source

model_convert

JuMP.model_convertFunction
model_convert(
+AUTOMATIC::ModelMode = 0
source

model_convert

JuMP.model_convertFunction
model_convert(
     model::AbstractModel,
     rhs::Union{
         AbstractConstraint,
@@ -1351,14 +1351,14 @@
         AbstractJuMPScalar,
         MOI.AbstractSet,
     },
-)

Convert the coefficients and constants of functions and sets in the rhs to the coefficient type value_type(typeof(model)).

Purpose

Creating and adding a constraint is a two-step process. The first step calls build_constraint, and the result of that is passed to add_constraint.

However, because build_constraint does not take the model as an argument, the coefficients and constants of the function or set might be different than value_type(typeof(model)).

Therefore, the result of build_constraint is converted in a call to model_convert before the result is passed to add_constraint.

source

model_string

JuMP.model_stringFunction
model_string(mode::MIME, model::AbstractModel)

Return a String representation of model given the mode.

Example

julia> model = Model();
+)

Convert the coefficients and constants of functions and sets in the rhs to the coefficient type value_type(typeof(model)).

Purpose

Creating and adding a constraint is a two-step process. The first step calls build_constraint, and the result of that is passed to add_constraint.

However, because build_constraint does not take the model as an argument, the coefficients and constants of the function or set might be different than value_type(typeof(model)).

Therefore, the result of build_constraint is converted in a call to model_convert before the result is passed to add_constraint.

source

model_string

JuMP.model_stringFunction
model_string(mode::MIME, model::AbstractModel)

Return a String representation of model given the mode.

Example

julia> model = Model();
 
 julia> @variable(model, x >= 0);
 
 julia> print(model_string(MIME("text/plain"), model))
 Feasibility
 Subject to
- x ≥ 0
source

moi_function

JuMP.moi_functionFunction
moi_function(x::AbstractJuMPScalar)
+ x ≥ 0
source

moi_function

JuMP.moi_functionFunction
moi_function(x::AbstractJuMPScalar)
 moi_function(x::AbstractArray{<:AbstractJuMPScalar})

Given a JuMP object x, return the MathOptInterface equivalent.

See also: jump_function.

Example

julia> model = Model();
 
 julia> @variable(model, x);
@@ -1367,8 +1367,8 @@
 2 x + 1
 
 julia> moi_function(f)
-1.0 + 2.0 MOI.VariableIndex(1)
source

moi_function_type

JuMP.moi_function_typeFunction
moi_function_type(::Type{T}) where {T}

Given a JuMP object type T, return the MathOptInterface equivalent.

See also: jump_function_type.

Example

julia> moi_function_type(AffExpr)
-MathOptInterface.ScalarAffineFunction{Float64}
source

moi_set

JuMP.moi_setFunction
moi_set(constraint::AbstractConstraint)

Return the set of the constraint constraint in the function-in-set form as a MathOptInterface.AbstractSet.

moi_set(s::AbstractVectorSet, dim::Int)

Returns the MOI set of dimension dim corresponding to the JuMP set s.

moi_set(s::AbstractScalarSet)

Returns the MOI set corresponding to the JuMP set s.

source

name

JuMP.nameFunction
name(con_ref::ConstraintRef)

Get a constraint's name attribute.

Example

julia> model = Model();
+1.0 + 2.0 MOI.VariableIndex(1)
source

moi_function_type

JuMP.moi_function_typeFunction
moi_function_type(::Type{T}) where {T}

Given a JuMP object type T, return the MathOptInterface equivalent.

See also: jump_function_type.

Example

julia> moi_function_type(AffExpr)
+MathOptInterface.ScalarAffineFunction{Float64}
source

moi_set

JuMP.moi_setFunction
moi_set(constraint::AbstractConstraint)

Return the set of the constraint constraint in the function-in-set form as a MathOptInterface.AbstractSet.

moi_set(s::AbstractVectorSet, dim::Int)

Returns the MOI set of dimension dim corresponding to the JuMP set s.

moi_set(s::AbstractScalarSet)

Returns the MOI set corresponding to the JuMP set s.

source

name

JuMP.nameFunction
name(con_ref::ConstraintRef)

Get a constraint's name attribute.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -1376,7 +1376,7 @@
 c : [2 x] ∈ Nonnegatives()
 
 julia> name(c)
-"c"
source
name(v::GenericVariableRef)::String

Get a variable's name attribute.

Example

julia> model = Model();
+"c"
source
name(v::GenericVariableRef)::String

Get a variable's name attribute.

Example

julia> model = Model();
 
 julia> @variable(model, x[1:2])
 2-element Vector{VariableRef}:
@@ -1384,10 +1384,10 @@
  x[2]
 
 julia> name(x[1])
-"x[1]"
source
name(model::AbstractModel)

Return the MOI.Name attribute of model's backend, or a default if empty.

Example

julia> model = Model();
+"x[1]"
source
name(model::AbstractModel)

Return the MOI.Name attribute of model's backend, or a default if empty.

Example

julia> model = Model();
 
 julia> name(model)
-"A JuMP Model"
source

node_count

JuMP.node_countFunction
node_count(model::GenericModel)

If available, returns the total number of branch-and-bound nodes explored during the most recent optimization in a Mixed Integer Program (the MOI.NodeCount attribute).

Throws a MOI.GetAttributeNotAllowed error if the attribute is not implemented by the solver.

Example

julia> import HiGHS
+"A JuMP Model"
source

node_count

JuMP.node_countFunction
node_count(model::GenericModel)

If available, returns the total number of branch-and-bound nodes explored during the most recent optimization in a Mixed Integer Program (the MOI.NodeCount attribute).

Throws a MOI.GetAttributeNotAllowed error if the attribute is not implemented by the solver.

Example

julia> import HiGHS
 
 julia> model = Model(HiGHS.Optimizer);
 
@@ -1396,7 +1396,7 @@
 julia> optimize!(model)
 
 julia> node_count(model)
-0
source

normalized_coefficient

JuMP.normalized_coefficientFunction
normalized_coefficient(
+0
source

normalized_coefficient

JuMP.normalized_coefficientFunction
normalized_coefficient(
     constraint::ConstraintRef,
     variable::GenericVariableRef,
 )

Return the coefficient associated with variable in constraint after JuMP has normalized the constraint into its standard form.

See also set_normalized_coefficient.

Example

julia> model = Model();
@@ -1416,7 +1416,7 @@
 julia> normalized_coefficient(con_vec, x)
 2-element Vector{Tuple{Int64, Float64}}:
  (1, 1.0)
- (2, 2.0)
source
normalized_coefficient(
+ (2, 2.0)
source
normalized_coefficient(
     constraint::ConstraintRef,
     variable_1::GenericVariableRef,
     variable_2::GenericVariableRef,
@@ -1441,7 +1441,7 @@
  (1, 1.0)
 
 julia> normalized_coefficient(con_vec, x[1], x[2])
-Tuple{Int64, Float64}[]
source

normalized_rhs

JuMP.normalized_rhsFunction
normalized_rhs(constraint::ConstraintRef)

Return the right-hand side term of constraint after JuMP has converted the constraint into its normalized form.

See also set_normalized_rhs.

Example

julia> model = Model();
+Tuple{Int64, Float64}[]
source

normalized_rhs

JuMP.normalized_rhsFunction
normalized_rhs(constraint::ConstraintRef)

Return the right-hand side term of constraint after JuMP has converted the constraint into its normalized form.

See also set_normalized_rhs.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -1449,7 +1449,7 @@
 con : 2 x ≤ 1
 
 julia> normalized_rhs(con)
-1.0
source

num_constraints

JuMP.num_constraintsFunction
num_constraints(model::GenericModel, function_type, set_type)::Int64

Return the number of constraints currently in the model where the function has type function_type and the set has type set_type.

See also list_of_constraint_types and all_constraints.

Example

julia> model = Model();
+1.0
source

num_constraints

JuMP.num_constraintsFunction
num_constraints(model::GenericModel, function_type, set_type)::Int64

Return the number of constraints currently in the model where the function has type function_type and the set has type set_type.

See also list_of_constraint_types and all_constraints.

Example

julia> model = Model();
 
 julia> @variable(model, x >= 0, Bin);
 
@@ -1468,7 +1468,7 @@
 1
 
 julia> num_constraints(model, AffExpr, MOI.LessThan{Float64})
-2
source
num_constraints(model::GenericModel; count_variable_in_set_constraints::Bool)

Return the number of constraints in model.

If count_variable_in_set_constraints == true, then VariableRef constraints such as VariableRef-in-Integer are included. To count only the number of structural constraints (for example, the rows in the constraint matrix of a linear program), pass count_variable_in_set_constraints = false.

Example

julia> model = Model();
+2
source
num_constraints(model::GenericModel; count_variable_in_set_constraints::Bool)

Return the number of constraints in model.

If count_variable_in_set_constraints == true, then VariableRef constraints such as VariableRef-in-Integer are included. To count only the number of structural constraints (for example, the rows in the constraint matrix of a linear program), pass count_variable_in_set_constraints = false.

Example

julia> model = Model();
 
 julia> @variable(model, x >= 0, Int);
 
@@ -1478,18 +1478,18 @@
 3
 
 julia> num_constraints(model; count_variable_in_set_constraints = false)
-1
source

num_variables

JuMP.num_variablesFunction
num_variables(model::GenericModel)::Int64

Returns number of variables in model.

Example

julia> model = Model();
+1
source

num_variables

JuMP.num_variablesFunction
num_variables(model::GenericModel)::Int64

Returns number of variables in model.

Example

julia> model = Model();
 
 julia> @variable(model, x[1:2]);
 
 julia> num_variables(model)
-2
source

object_dictionary

JuMP.object_dictionaryFunction
object_dictionary(model::GenericModel)

Return the dictionary that maps the symbol name of a variable, constraint, or expression to the corresponding object.

Objects are registered to a specific symbol in the macros. For example, @variable(model, x[1:2, 1:2]) registers the array of variables x to the symbol :x.

This method should be defined for any subtype of AbstractModel.

See also: unregister.

Example

julia> model = Model();
+2
source

object_dictionary

JuMP.object_dictionaryFunction
object_dictionary(model::GenericModel)

Return the dictionary that maps the symbol name of a variable, constraint, or expression to the corresponding object.

Objects are registered to a specific symbol in the macros. For example, @variable(model, x[1:2, 1:2]) registers the array of variables x to the symbol :x.

This method should be defined for any subtype of AbstractModel.

See also: unregister.

Example

julia> model = Model();
 
 julia> @variable(model, x[1:2]);
 
 julia> object_dictionary(model)
 Dict{Symbol, Any} with 1 entry:
-  :x => VariableRef[x[1], x[2]]
source

objective_bound

JuMP.objective_boundFunction
objective_bound(model::GenericModel)

Return the best known bound on the optimal objective value after a call to optimize!(model).

For scalar-valued objectives, this function returns a Float64. For vector-valued objectives, it returns a Vector{Float64}.

In the case of a vector-valued objective, this returns the ideal point, that is, the point obtained if each objective was optimized independently.

This function is equivalent to querying the MOI.ObjectiveBound attribute.

Example

julia> import HiGHS
+  :x => VariableRef[x[1], x[2]]
source

objective_bound

JuMP.objective_boundFunction
objective_bound(model::GenericModel)

Return the best known bound on the optimal objective value after a call to optimize!(model).

For scalar-valued objectives, this function returns a Float64. For vector-valued objectives, it returns a Vector{Float64}.

In the case of a vector-valued objective, this returns the ideal point, that is, the point obtained if each objective was optimized independently.

This function is equivalent to querying the MOI.ObjectiveBound attribute.

Example

julia> import HiGHS
 
 julia> model = Model(HiGHS.Optimizer);
 
@@ -1502,7 +1502,7 @@
 julia> optimize!(model)
 
 julia> objective_bound(model)
-3.0
source

objective_function

JuMP.objective_functionFunction
objective_function(
+3.0
source

objective_function

JuMP.objective_functionFunction
objective_function(
     model::GenericModel,
     ::Type{F} = objective_function_type(model),
 ) where {F}

Return an object of type F representing the objective function.

Errors if the objective is not convertible to type F.

This function is equivalent to querying the MOI.ObjectiveFunction{F} attribute.

Example

julia> model = Model();
@@ -1522,21 +1522,21 @@
 julia> typeof(objective_function(model, QuadExpr))
 QuadExpr (alias for GenericQuadExpr{Float64, GenericVariableRef{Float64}})

We see with the last two commands that even if the objective function is affine, as it is convertible to a quadratic function, it can be queried as a quadratic function and the result is quadratic.

However, it is not convertible to a variable:

julia> objective_function(model, VariableRef)
 ERROR: InexactError: convert(MathOptInterface.VariableIndex, 1.0 + 2.0 MOI.VariableIndex(1))
-[...]
source

objective_function_string

JuMP.objective_function_stringFunction
objective_function_string(mode, model::AbstractModel)::String

Return a String describing the objective function of the model.

Example

julia> model = Model();
+[...]
source

objective_function_string

JuMP.objective_function_stringFunction
objective_function_string(mode, model::AbstractModel)::String

Return a String describing the objective function of the model.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
 julia> @objective(model, Min, 2 * x);
 
 julia> objective_function_string(MIME("text/plain"), model)
-"2 x"
source

objective_function_type

JuMP.objective_function_typeFunction
objective_function_type(model::GenericModel)::AbstractJuMPScalar

Return the type of the objective function.

This function is equivalent to querying the MOI.ObjectiveFunctionType attribute.

Example

julia> model = Model();
+"2 x"
source

objective_function_type

JuMP.objective_function_typeFunction
objective_function_type(model::GenericModel)::AbstractJuMPScalar

Return the type of the objective function.

This function is equivalent to querying the MOI.ObjectiveFunctionType attribute.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
 julia> @objective(model, Min, 2 * x + 1);
 
 julia> objective_function_type(model)
-AffExpr (alias for GenericAffExpr{Float64, GenericVariableRef{Float64}})
source

objective_sense

JuMP.objective_senseFunction
objective_sense(model::GenericModel)::MOI.OptimizationSense

Return the objective sense.

This function is equivalent to querying the MOI.ObjectiveSense attribute.

Example

julia> model = Model();
+AffExpr (alias for GenericAffExpr{Float64, GenericVariableRef{Float64}})
source

objective_sense

JuMP.objective_senseFunction
objective_sense(model::GenericModel)::MOI.OptimizationSense

Return the objective sense.

This function is equivalent to querying the MOI.ObjectiveSense attribute.

Example

julia> model = Model();
 
 julia> objective_sense(model)
 FEASIBILITY_SENSE::OptimizationSense = 2
@@ -1547,7 +1547,7 @@
 x
 
 julia> objective_sense(model)
-MAX_SENSE::OptimizationSense = 1
source

objective_value

JuMP.objective_valueFunction
objective_value(model::GenericModel; result::Int = 1)

Return the objective value associated with result index result of the most-recent solution returned by the solver.

For scalar-valued objectives, this function returns a Float64. For vector-valued objectives, it returns a Vector{Float64}.

This function is equivalent to querying the MOI.ObjectiveValue attribute.

See also: result_count.

Example

julia> import HiGHS
+MAX_SENSE::OptimizationSense = 1
source

objective_value

JuMP.objective_valueFunction
objective_value(model::GenericModel; result::Int = 1)

Return the objective value associated with result index result of the most-recent solution returned by the solver.

For scalar-valued objectives, this function returns a Float64. For vector-valued objectives, it returns a Vector{Float64}.

This function is equivalent to querying the MOI.ObjectiveValue attribute.

See also: result_count.

Example

julia> import HiGHS
 
 julia> model = Model(HiGHS.Optimizer);
 
@@ -1565,7 +1565,7 @@
 julia> objective_value(model; result = 2)
 ERROR: Result index of attribute MathOptInterface.ObjectiveValue(2) out of bounds. There are currently 1 solution(s) in the model.
 Stacktrace:
-[...]
source

op_ifelse

JuMP.op_ifelseFunction
op_ifelse(a, x, y)

A function that falls back to ifelse(a, x, y), but when called with a JuMP variables or expression in the first argument, returns a GenericNonlinearExpr.

Example

julia> model = Model();
+[...]
source

op_ifelse

JuMP.op_ifelseFunction
op_ifelse(a, x, y)

A function that falls back to ifelse(a, x, y), but when called with a JuMP variables or expression in the first argument, returns a GenericNonlinearExpr.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -1576,14 +1576,14 @@
 ifelse(x, 1.0, 2.0)
 
 julia> op_ifelse(true, x, 2.0)
-x
source

op_string

JuMP.op_stringFunction
op_string(mime::MIME, x::GenericNonlinearExpr, ::Val{op}) where {op}

Return the string that should be printed for the operator op when function_string is called with mime and x.

Example

julia> model = Model();
+x
source

op_string

JuMP.op_stringFunction
op_string(mime::MIME, x::GenericNonlinearExpr, ::Val{op}) where {op}

Return the string that should be printed for the operator op when function_string is called with mime and x.

Example

julia> model = Model();
 
 julia> @variable(model, x[1:2], Bin);
 
 julia> f = @expression(model, x[1] || x[2]);
 
 julia> op_string(MIME("text/plain"), f, Val(:||))
-"||"
source

operator_to_set

JuMP.operator_to_setFunction
operator_to_set(error_fn::Function, ::Val{sense_symbol})

Converts a sense symbol to a set set such that @constraint(model, func sense_symbol 0) is equivalent to @constraint(model, func in set) for any func::AbstractJuMPScalar.

Example

Once a custom set is defined you can directly create a JuMP constraint with it:

julia> struct CustomSet{T} <: MOI.AbstractScalarSet
+"||"
source

operator_to_set

JuMP.operator_to_setFunction
operator_to_set(error_fn::Function, ::Val{sense_symbol})

Converts a sense symbol to a set set such that @constraint(model, func sense_symbol 0) is equivalent to @constraint(model, func in set) for any func::AbstractJuMPScalar.

Example

Once a custom set is defined you can directly create a JuMP constraint with it:

julia> struct CustomSet{T} <: MOI.AbstractScalarSet
            value::T
        end
 
@@ -1602,8 +1602,8 @@
 julia> MOIU.shift_constant(set::CustomSet, value) = CustomSet(set.value + value)
 
 julia> cref = @constraint(model, x ⊰ 1)
-x ∈ CustomSet{Float64}(1.0)

Note that the whole function is first moved to the right-hand side, then the sign is transformed into a set with zero constant and finally the constant is moved to the set with MOIU.shift_constant.

source

operator_warn

JuMP.operator_warnFunction
operator_warn(model::AbstractModel)
-operator_warn(model::GenericModel)

This function is called on the model whenever two affine expressions are added together without using destructive_add!, and at least one of the two expressions has more than 50 terms.

For the case of Model, if this function is called more than 20,000 times then a warning is generated once.

This method should only be implemented by developers creating JuMP extensions. It should never be called by users of JuMP.

source

optimize!

JuMP.optimize!Function
optimize!(
+x ∈ CustomSet{Float64}(1.0)

Note that the whole function is first moved to the right-hand side, then the sign is transformed into a set with zero constant and finally the constant is moved to the set with MOIU.shift_constant.

source

operator_warn

JuMP.operator_warnFunction
operator_warn(model::AbstractModel)
+operator_warn(model::GenericModel)

This function is called on the model whenever two affine expressions are added together without using destructive_add!, and at least one of the two expressions has more than 50 terms.

For the case of Model, if this function is called more than 20,000 times then a warning is generated once.

This method should only be implemented by developers creating JuMP extensions. It should never be called by users of JuMP.

source

optimize!

JuMP.optimize!Function
optimize!(
     model::GenericModel;
     ignore_optimize_hook = (model.optimize_hook === nothing),
     kwargs...,
@@ -1623,7 +1623,7 @@
 my_optimize_hook (generic function with 1 method)
 
 julia> optimize!(model; foo = 2)
-Hook called with foo = 2
source

optimizer_index

JuMP.optimizer_indexFunction
optimizer_index(x::GenericVariableRef)::MOI.VariableIndex
+Hook called with foo = 2
source

optimizer_index

JuMP.optimizer_indexFunction
optimizer_index(x::GenericVariableRef)::MOI.VariableIndex
 optimizer_index(x::ConstraintRef{<:GenericModel})::MOI.ConstraintIndex

Return the variable or constraint index that corresponds to x in the associated model unsafe_backend(owner_model(x)).

This function should be used with unsafe_backend.

As a safer alternative, use backend and index. See the docstrings of backend and unsafe_backend for more details.

Throws

  • Throws NoOptimizer if no optimizer is set.
  • Throws an ErrorException if the optimizer is set but is not attached.
  • Throws an ErrorException if the index is bridged.

Example

julia> import HiGHS
 
 julia> model = Model(HiGHS.Optimizer);
@@ -1639,7 +1639,7 @@
 A HiGHS model with 1 columns and 0 rows.
 
 julia> optimizer_index(x)
-MOI.VariableIndex(1)
source

optimizer_with_attributes

JuMP.optimizer_with_attributesFunction
optimizer_with_attributes(optimizer_constructor, attrs::Pair...)

Groups an optimizer constructor with the list of attributes attrs. Note that it is equivalent to MOI.OptimizerWithAttributes.

When provided to the Model constructor or to set_optimizer, it creates an optimizer by calling optimizer_constructor(), and then sets the attributes using set_attribute.

See also: set_attribute, get_attribute.

Note

The string names of the attributes are specific to each solver. One should consult the solver's documentation to find the attributes of interest.

Example

julia> import HiGHS
+MOI.VariableIndex(1)
source

optimizer_with_attributes

JuMP.optimizer_with_attributesFunction
optimizer_with_attributes(optimizer_constructor, attrs::Pair...)

Groups an optimizer constructor with the list of attributes attrs. Note that it is equivalent to MOI.OptimizerWithAttributes.

When provided to the Model constructor or to set_optimizer, it creates an optimizer by calling optimizer_constructor(), and then sets the attributes using set_attribute.

See also: set_attribute, get_attribute.

Note

The string names of the attributes are specific to each solver. One should consult the solver's documentation to find the attributes of interest.

Example

julia> import HiGHS
 
 julia> optimizer = optimizer_with_attributes(
            HiGHS.Optimizer, "presolve" => "off", MOI.Silent() => true,
@@ -1651,12 +1651,12 @@
 
 julia> set_attribute(model, "presolve", "off")
 
-julia> set_attribute(model, MOI.Silent(), true)
source

owner_model

JuMP.owner_modelFunction
owner_model(s::AbstractJuMPScalar)

Return the model owning the scalar s.

Example

julia> model = Model();
+julia> set_attribute(model, MOI.Silent(), true)
source

owner_model

JuMP.owner_modelFunction
owner_model(s::AbstractJuMPScalar)

Return the model owning the scalar s.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
 julia> owner_model(x) === model
-true
source

parameter_value

JuMP.parameter_valueFunction
parameter_value(x::GenericVariableRef)

Return the value of the parameter x.

Errors if x is not a parameter.

See also ParameterRef, is_parameter, set_parameter_value.

Example

julia> model = Model();
+true
source

parameter_value

JuMP.parameter_valueFunction
parameter_value(x::GenericVariableRef)

Return the value of the parameter x.

Errors if x is not a parameter.

See also ParameterRef, is_parameter, set_parameter_value.

Example

julia> model = Model();
 
 julia> @variable(model, p in Parameter(2))
 p
@@ -1667,23 +1667,23 @@
 julia> set_parameter_value(p, 2.5)
 
 julia> parameter_value(p)
-2.5
source

parse_constraint

JuMP.parse_constraintFunction
parse_constraint(error_fn::Function, expr::Expr)

The entry-point for all constraint-related parsing.

Arguments

  • The error_fn function is passed everywhere to provide better error messages
  • expr comes from the @constraint macro. There are two possibilities:
    • @constraint(model, expr)
    • @constraint(model, name[args], expr)
    In both cases, expr is the main component of the constraint.

Supported syntax

JuMP currently supports the following expr objects:

  • lhs <= rhs
  • lhs == rhs
  • lhs >= rhs
  • l <= body <= u
  • u >= body >= l
  • lhs ⟂ rhs
  • lhs in rhs
  • lhs ∈ rhs
  • z --> {constraint}
  • !z --> {constraint}
  • z <--> {constraint}
  • !z <--> {constraint}
  • z => {constraint}
  • !z => {constraint}

as well as all broadcasted variants.

Extensions

The infrastructure behind parse_constraint is extendable. See parse_constraint_head and parse_constraint_call for details.

source

parse_constraint_call

JuMP.parse_constraint_callFunction
parse_constraint_call(
+2.5
source

parse_constraint

JuMP.parse_constraintFunction
parse_constraint(error_fn::Function, expr::Expr)

The entry-point for all constraint-related parsing.

Arguments

  • The error_fn function is passed everywhere to provide better error messages
  • expr comes from the @constraint macro. There are two possibilities:
    • @constraint(model, expr)
    • @constraint(model, name[args], expr)
    In both cases, expr is the main component of the constraint.

Supported syntax

JuMP currently supports the following expr objects:

  • lhs <= rhs
  • lhs == rhs
  • lhs >= rhs
  • l <= body <= u
  • u >= body >= l
  • lhs ⟂ rhs
  • lhs in rhs
  • lhs ∈ rhs
  • z --> {constraint}
  • !z --> {constraint}
  • z <--> {constraint}
  • !z <--> {constraint}
  • z => {constraint}
  • !z => {constraint}

as well as all broadcasted variants.

Extensions

The infrastructure behind parse_constraint is extendable. See parse_constraint_head and parse_constraint_call for details.

source

parse_constraint_call

JuMP.parse_constraint_callFunction
parse_constraint_call(
     error_fn::Function,
     is_vectorized::Bool,
     ::Val{op},
     args...,
-)

Implement this method to intercept the parsing of a :call expression with operator op.

Warning

Extending the constraint macro at parse time is an advanced operation and has the potential to interfere with existing JuMP syntax. Please discuss with the developer chatroom before publishing any code that implements these methods.

Arguments

  • error_fn: a function that accepts a String and throws the string as an error, along with some descriptive information of the macro from which it was thrown.
  • is_vectorized: a boolean to indicate if op should be broadcast or not
  • op: the first element of the .args field of the Expr to intercept
  • args...: the .args field of the Expr.

Returns

This function must return:

  • parse_code::Expr: an expression containing any setup or rewriting code that needs to be called before build_constraint
  • build_code::Expr: an expression that calls build_constraint( or build_constraint.( depending on is_vectorized.

See also: parse_constraint_head, build_constraint

source
parse_constraint_call(
+)

Implement this method to intercept the parsing of a :call expression with operator op.

Warning

Extending the constraint macro at parse time is an advanced operation and has the potential to interfere with existing JuMP syntax. Please discuss with the developer chatroom before publishing any code that implements these methods.

Arguments

  • error_fn: a function that accepts a String and throws the string as an error, along with some descriptive information of the macro from which it was thrown.
  • is_vectorized: a boolean to indicate if op should be broadcast or not
  • op: the first element of the .args field of the Expr to intercept
  • args...: the .args field of the Expr.

Returns

This function must return:

  • parse_code::Expr: an expression containing any setup or rewriting code that needs to be called before build_constraint
  • build_code::Expr: an expression that calls build_constraint( or build_constraint.( depending on is_vectorized.

See also: parse_constraint_head, build_constraint

source
parse_constraint_call(
     error_fn::Function,
     vectorized::Bool,
     ::Val{op},
     lhs,
     rhs,
-) where {op}

Fallback handler for binary operators. These might be infix operators like @constraint(model, lhs op rhs), or normal operators like @constraint(model, op(lhs, rhs)).

In both cases, we rewrite as lhs - rhs in operator_to_set(error_fn, op).

See operator_to_set for details.

source

parse_constraint_head

JuMP.parse_constraint_headFunction
parse_constraint_head(error_fn::Function, ::Val{head}, args...)

Implement this method to intercept the parsing of an expression with head head.

Warning

Extending the constraint macro at parse time is an advanced operation and has the potential to interfere with existing JuMP syntax. Please discuss with the developer chatroom before publishing any code that implements these methods.

Arguments

  • error_fn: a function that accepts a String and throws the string as an error, along with some descriptive information of the macro from which it was thrown.
  • head: the .head field of the Expr to intercept
  • args...: the .args field of the Expr.

Returns

This function must return:

  • is_vectorized::Bool: whether the expression represents a broadcasted expression like x .<= 1
  • parse_code::Expr: an expression containing any setup or rewriting code that needs to be called before build_constraint
  • build_code::Expr: an expression that calls build_constraint( or build_constraint.( depending on is_vectorized.

Existing implementations

JuMP currently implements:

  • ::Val{:call}, which forwards calls to parse_constraint_call
  • ::Val{:comparison}, which handles the special case of l <= body <= u.

See also: parse_constraint_call, build_constraint

source

parse_one_operator_variable

JuMP.parse_one_operator_variableFunction
parse_one_operator_variable(
+) where {op}

Fallback handler for binary operators. These might be infix operators like @constraint(model, lhs op rhs), or normal operators like @constraint(model, op(lhs, rhs)).

In both cases, we rewrite as lhs - rhs in operator_to_set(error_fn, op).

See operator_to_set for details.

source

parse_constraint_head

JuMP.parse_constraint_headFunction
parse_constraint_head(error_fn::Function, ::Val{head}, args...)

Implement this method to intercept the parsing of an expression with head head.

Warning

Extending the constraint macro at parse time is an advanced operation and has the potential to interfere with existing JuMP syntax. Please discuss with the developer chatroom before publishing any code that implements these methods.

Arguments

  • error_fn: a function that accepts a String and throws the string as an error, along with some descriptive information of the macro from which it was thrown.
  • head: the .head field of the Expr to intercept
  • args...: the .args field of the Expr.

Returns

This function must return:

  • is_vectorized::Bool: whether the expression represents a broadcasted expression like x .<= 1
  • parse_code::Expr: an expression containing any setup or rewriting code that needs to be called before build_constraint
  • build_code::Expr: an expression that calls build_constraint( or build_constraint.( depending on is_vectorized.

Existing implementations

JuMP currently implements:

  • ::Val{:call}, which forwards calls to parse_constraint_call
  • ::Val{:comparison}, which handles the special case of l <= body <= u.

See also: parse_constraint_call, build_constraint

source

parse_one_operator_variable

JuMP.parse_one_operator_variableFunction
parse_one_operator_variable(
     error_fn::Function,
     info_expr::_VariableInfoExpr,
     sense::Val{S},
     value,
-) where {S}

Update infoexr for a variable expression in the @variable macro of the form variable name S value.

source

parse_ternary_variable

JuMP.parse_ternary_variableFunction
parse_ternary_variable(error_fn, info_expr, lhs_sense, lhs, rhs_sense, rhs)

A hook for JuMP extensions to intercept the parsing of a :comparison expression, which has the form lhs lhs_sense variable rhs_sense rhs.

source

parse_variable

JuMP.parse_variableFunction
parse_variable(error_fn::Function, ::_VariableInfoExpr, args...)

A hook for extensions to intercept the parsing of inequality constraints in the @variable macro.

source

primal_feasibility_report

JuMP.primal_feasibility_reportFunction
primal_feasibility_report(
+) where {S}

Update infoexr for a variable expression in the @variable macro of the form variable name S value.

source

parse_ternary_variable

JuMP.parse_ternary_variableFunction
parse_ternary_variable(error_fn, info_expr, lhs_sense, lhs, rhs_sense, rhs)

A hook for JuMP extensions to intercept the parsing of a :comparison expression, which has the form lhs lhs_sense variable rhs_sense rhs.

source

parse_variable

JuMP.parse_variableFunction
parse_variable(error_fn::Function, ::_VariableInfoExpr, args...)

A hook for extensions to intercept the parsing of inequality constraints in the @variable macro.

source

primal_feasibility_report

JuMP.primal_feasibility_reportFunction
primal_feasibility_report(
     model::GenericModel{T},
     point::AbstractDict{GenericVariableRef{T},T} = _last_primal_solution(model),
     atol::T = zero(T),
@@ -1694,7 +1694,7 @@
 
 julia> primal_feasibility_report(model, Dict(x => 0.2))
 Dict{Any, Float64} with 1 entry:
-  x ≥ 0.5 => 0.3
source
primal_feasibility_report(
+  x ≥ 0.5 => 0.3
source
primal_feasibility_report(
     point::Function,
     model::GenericModel{T};
     atol::T = zero(T),
@@ -1707,30 +1707,30 @@
            return start_value(v)
        end
 Dict{Any, Float64} with 1 entry:
-  x ≤ 1 => 0.3
source

primal_status

JuMP.primal_statusFunction
primal_status(model::GenericModel; result::Int = 1)

Return a MOI.ResultStatusCode describing the status of the most recent primal solution of the solver (that is, the MOI.PrimalStatus attribute) associated with the result index result.

See also: result_count.

Example

julia> import Ipopt
+  x ≤ 1 => 0.3
source

primal_status

JuMP.primal_statusFunction
primal_status(model::GenericModel; result::Int = 1)

Return a MOI.ResultStatusCode describing the status of the most recent primal solution of the solver (that is, the MOI.PrimalStatus attribute) associated with the result index result.

See also: result_count.

Example

julia> import Ipopt
 
 julia> model = Model(Ipopt.Optimizer);
 
 julia> primal_status(model; result = 2)
-NO_SOLUTION::ResultStatusCode = 0
source
JuMP.print_active_bridgesFunction
print_active_bridges([io::IO = stdout,] model::GenericModel)

Print a list of the variable, constraint, and objective bridges that are currently used in the model.

source
print_active_bridges([io::IO = stdout,] model::GenericModel, ::Type{F}) where {F}

Print a list of bridges required for an objective function of type F.

source
print_active_bridges(
+NO_SOLUTION::ResultStatusCode = 0
source
JuMP.print_active_bridgesFunction
print_active_bridges([io::IO = stdout,] model::GenericModel)

Print a list of the variable, constraint, and objective bridges that are currently used in the model.

source
print_active_bridges([io::IO = stdout,] model::GenericModel, ::Type{F}) where {F}

Print a list of bridges required for an objective function of type F.

source
print_active_bridges(
     [io::IO = stdout,]
     model::GenericModel,
     F::Type,
     S::Type{<:MOI.AbstractSet},
-)

Print a list of bridges required for a constraint of type F-in-S.

source
print_active_bridges(
+)

Print a list of bridges required for a constraint of type F-in-S.

source
print_active_bridges(
     [io::IO = stdout,]
     model::GenericModel,
     S::Type{<:MOI.AbstractSet},
-)

Print a list of bridges required to add a variable constrained to the set S.

source
JuMP.print_bridge_graphFunction
 print_bridge_graph([io::IO,] model::GenericModel)

Print the hyper-graph containing all variable, constraint, and objective types that could be obtained by bridging the variables, constraints, and objectives that are present in the model.

Warning

This function is intended for advanced users. If you want to see only the bridges that are currently used, use print_active_bridges instead.

Explanation of output

Each node in the hyper-graph corresponds to a variable, constraint, or objective type.

  • Variable nodes are indicated by [ ]
  • Constraint nodes are indicated by ( )
  • Objective nodes are indicated by | |

The number inside each pair of brackets is an index of the node in the hyper-graph.

Note that this hyper-graph is the full list of possible transformations. When the bridged model is created, we select the shortest hyper-path(s) from this graph, so many nodes may be un-used.

For more information, see Legat, B., Dowson, O., Garcia, J., and Lubin, M. (2020). "MathOptInterface: a data structure for mathematical optimization problems." URL: https://arxiv.org/abs/2002.03447

source

quad_terms

JuMP.quad_termsFunction
quad_terms(quad::GenericQuadExpr{C,V})

Provides an iterator over tuples (coefficient::C, var_1::V, var_2::V) in the quadratic part of the quadratic expression.

source

raw_status

JuMP.raw_statusFunction
raw_status(model::GenericModel)

Return the reason why the solver stopped in its own words (that is, the MathOptInterface model attribute MOI.RawStatusString).

Example

julia> import Ipopt
+)

Print a list of bridges required to add a variable constrained to the set S.

source
JuMP.print_bridge_graphFunction
 print_bridge_graph([io::IO,] model::GenericModel)

Print the hyper-graph containing all variable, constraint, and objective types that could be obtained by bridging the variables, constraints, and objectives that are present in the model.

Warning

This function is intended for advanced users. If you want to see only the bridges that are currently used, use print_active_bridges instead.

Explanation of output

Each node in the hyper-graph corresponds to a variable, constraint, or objective type.

  • Variable nodes are indicated by [ ]
  • Constraint nodes are indicated by ( )
  • Objective nodes are indicated by | |

The number inside each pair of brackets is an index of the node in the hyper-graph.

Note that this hyper-graph is the full list of possible transformations. When the bridged model is created, we select the shortest hyper-path(s) from this graph, so many nodes may be un-used.

For more information, see Legat, B., Dowson, O., Garcia, J., and Lubin, M. (2020). "MathOptInterface: a data structure for mathematical optimization problems." URL: https://arxiv.org/abs/2002.03447

source

quad_terms

JuMP.quad_termsFunction
quad_terms(quad::GenericQuadExpr{C,V})

Provides an iterator over tuples (coefficient::C, var_1::V, var_2::V) in the quadratic part of the quadratic expression.

source

raw_status

JuMP.raw_statusFunction
raw_status(model::GenericModel)

Return the reason why the solver stopped in its own words (that is, the MathOptInterface model attribute MOI.RawStatusString).

Example

julia> import Ipopt
 
 julia> model = Model(Ipopt.Optimizer);
 
 julia> raw_status(model)
-"optimize not called"
source

read_from_file

JuMP.read_from_fileFunction
read_from_file(
+"optimize not called"
source

read_from_file

JuMP.read_from_fileFunction
read_from_file(
     filename::String;
     format::MOI.FileFormats.FileFormat = MOI.FileFormats.FORMAT_AUTOMATIC,
     kwargs...,
-)

Return a JuMP model read from filename in the format format.

If the filename ends in .gz, it will be uncompressed using GZip. If the filename ends in .bz2, it will be uncompressed using BZip2.

Other kwargs are passed to the Model constructor of the chosen format.

source

reduced_cost

JuMP.reduced_costFunction
reduced_cost(x::GenericVariableRef{T})::T where {T}

Return the reduced cost associated with variable x.

One interpretation of the reduced cost is that it is the change in the objective from an infinitesimal relaxation of the variable bounds.

This method is equivalent to querying the shadow price of the active variable bound (if one exists and is active).

See also: shadow_price.

Example

julia> import HiGHS
+)

Return a JuMP model read from filename in the format format.

If the filename ends in .gz, it will be uncompressed using GZip. If the filename ends in .bz2, it will be uncompressed using BZip2.

Other kwargs are passed to the Model constructor of the chosen format.

source

reduced_cost

JuMP.reduced_costFunction
reduced_cost(x::GenericVariableRef{T})::T where {T}

Return the reduced cost associated with variable x.

One interpretation of the reduced cost is that it is the change in the objective from an infinitesimal relaxation of the variable bounds.

This method is equivalent to querying the shadow price of the active variable bound (if one exists and is active).

See also: shadow_price.

Example

julia> import HiGHS
 
 julia> model = Model(HiGHS.Optimizer);
 
@@ -1746,7 +1746,7 @@
 true
 
 julia> reduced_cost(x)
-2.0
source

relative_gap

JuMP.relative_gapFunction
relative_gap(model::GenericModel)

Return the final relative optimality gap after a call to optimize!(model).

Exact value depends upon implementation of MOI.RelativeGap by the particular solver used for optimization.

This function is equivalent to querying the MOI.RelativeGap attribute.

Example

julia> import HiGHS
+2.0
source

relative_gap

JuMP.relative_gapFunction
relative_gap(model::GenericModel)

Return the final relative optimality gap after a call to optimize!(model).

Exact value depends upon implementation of MOI.RelativeGap by the particular solver used for optimization.

This function is equivalent to querying the MOI.RelativeGap attribute.

Example

julia> import HiGHS
 
 julia> model = Model(HiGHS.Optimizer);
 
@@ -1759,7 +1759,7 @@
 julia> optimize!(model)
 
 julia> relative_gap(model)
-0.0
source

relax_integrality

JuMP.relax_integralityFunction
relax_integrality(model::GenericModel)

Modifies model to "relax" all binary and integrality constraints on variables. Specifically,

  • Binary constraints are deleted, and variable bounds are tightened if necessary to ensure the variable is constrained to the interval $[0, 1]$.
  • Integrality constraints are deleted without modifying variable bounds.
  • An error is thrown if semi-continuous or semi-integer constraints are present (support may be added for these in the future).
  • All other constraints are ignored (left in place). This includes discrete constraints like SOS and indicator constraints.

Returns a function that can be called without any arguments to restore the original model. The behavior of this function is undefined if additional changes are made to the affected variables in the meantime.

Example

julia> model = Model();
+0.0
source

relax_integrality

JuMP.relax_integralityFunction
relax_integrality(model::GenericModel)

Modifies model to "relax" all binary and integrality constraints on variables. Specifically,

  • Binary constraints are deleted, and variable bounds are tightened if necessary to ensure the variable is constrained to the interval $[0, 1]$.
  • Integrality constraints are deleted without modifying variable bounds.
  • An error is thrown if semi-continuous or semi-integer constraints are present (support may be added for these in the future).
  • All other constraints are ignored (left in place). This includes discrete constraints like SOS and indicator constraints.

Returns a function that can be called without any arguments to restore the original model. The behavior of this function is undefined if additional changes are made to the affected variables in the meantime.

Example

julia> model = Model();
 
 julia> @variable(model, x, Bin);
 
@@ -1785,7 +1785,7 @@
  y ≥ 1
  y ≤ 10
  y integer
- x binary
source

relax_with_penalty!

JuMP.relax_with_penalty!Function
relax_with_penalty!(
+ x binary
source

relax_with_penalty!

JuMP.relax_with_penalty!Function
relax_with_penalty!(
     model::GenericModel{T},
     [penalties::Dict{ConstraintRef,T}];
     [default::Union{Nothing,Real} = nothing,]
@@ -1828,7 +1828,7 @@
 Subject to
  c2 : 3 x + _[2] ≥ 0
  c1 : 2 x ≤ -1
- _[2] ≥ 0
source

remove_bridge

JuMP.remove_bridgeFunction
remove_bridge(
+ _[2] ≥ 0
source

remove_bridge

JuMP.remove_bridgeFunction
remove_bridge(
     model::GenericModel{S},
     BT::Type{<:MOI.Bridges.AbstractBridge};
     coefficient_type::Type{T} = S,
@@ -1848,17 +1848,17 @@
            model,
            MOI.Bridges.Constraint.NumberConversionBridge;
            coefficient_type = Complex{Float64},
-       )
source

reshape_set

JuMP.reshape_setFunction
reshape_set(vectorized_set::MOI.AbstractSet, shape::AbstractShape)

Return a set in its original shape shape given its vectorized form vectorized_form.

Example

Given a SymmetricMatrixShape of vectorized form [1, 2, 3] in MOI.PositiveSemidefinieConeTriangle(2), the following code returns the set of the original constraint Symmetric(Matrix[1 2; 2 3]) in PSDCone():

julia> reshape_set(MOI.PositiveSemidefiniteConeTriangle(2), SymmetricMatrixShape(2))
-PSDCone()
source

reshape_vector

JuMP.reshape_vectorFunction
reshape_vector(vectorized_form::Vector, shape::AbstractShape)

Return an object in its original shape shape given its vectorized form vectorized_form.

Example

Given a SymmetricMatrixShape of vectorized form [1, 2, 3], the following code returns the matrix Symmetric(Matrix[1 2; 2 3]):

julia> reshape_vector([1, 2, 3], SymmetricMatrixShape(2))
+       )
source

reshape_set

JuMP.reshape_setFunction
reshape_set(vectorized_set::MOI.AbstractSet, shape::AbstractShape)

Return a set in its original shape shape given its vectorized form vectorized_form.

Example

Given a SymmetricMatrixShape of vectorized form [1, 2, 3] in MOI.PositiveSemidefinieConeTriangle(2), the following code returns the set of the original constraint Symmetric(Matrix[1 2; 2 3]) in PSDCone():

julia> reshape_set(MOI.PositiveSemidefiniteConeTriangle(2), SymmetricMatrixShape(2))
+PSDCone()
source

reshape_vector

JuMP.reshape_vectorFunction
reshape_vector(vectorized_form::Vector, shape::AbstractShape)

Return an object in its original shape shape given its vectorized form vectorized_form.

Example

Given a SymmetricMatrixShape of vectorized form [1, 2, 3], the following code returns the matrix Symmetric(Matrix[1 2; 2 3]):

julia> reshape_vector([1, 2, 3], SymmetricMatrixShape(2))
 2×2 LinearAlgebra.Symmetric{Int64, Matrix{Int64}}:
  1  2
- 2  3
source

result_count

JuMP.result_countFunction
result_count(model::GenericModel)

Return the number of results available to query after a call to optimize!.

Example

julia> import Ipopt
+ 2  3
source

result_count

JuMP.result_countFunction
result_count(model::GenericModel)

Return the number of results available to query after a call to optimize!.

Example

julia> import Ipopt
 
 julia> model = Model(Ipopt.Optimizer);
 
 julia> result_count(model)
-0
source

reverse_sense

JuMP.reverse_senseFunction
reverse_sense(::Val{T}) where {T}

Given an (in)equality symbol T, return a new Val object with the opposite (in)equality symbol.

This function is intended for use in JuMP extensions.

Example

julia> reverse_sense(Val(:>=))
-Val{:<=}()
source

set_attribute

JuMP.set_attributeFunction
set_attribute(model::GenericModel, attr::MOI.AbstractModelAttribute, value)
+0
source

reverse_sense

JuMP.reverse_senseFunction
reverse_sense(::Val{T}) where {T}

Given an (in)equality symbol T, return a new Val object with the opposite (in)equality symbol.

This function is intended for use in JuMP extensions.

Example

julia> reverse_sense(Val(:>=))
+Val{:<=}()
source

set_attribute

JuMP.set_attributeFunction
set_attribute(model::GenericModel, attr::MOI.AbstractModelAttribute, value)
 set_attribute(x::GenericVariableRef, attr::MOI.AbstractVariableAttribute, value)
 set_attribute(cr::ConstraintRef, attr::MOI.AbstractConstraintAttribute, value)

Set the value of a solver-specifc attribute attr to value.

This is equivalent to calling MOI.set with the associated MOI model and, for variables and constraints, with the associated MOI.VariableIndex or MOI.ConstraintIndex.

Example

julia> model = Model();
 
@@ -1872,7 +1872,7 @@
 
 julia> set_attribute(x, MOI.VariableName(), "x_new")
 
-julia> set_attribute(c, MOI.ConstraintName(), "c_new")
source
set_attribute(
+julia> set_attribute(c, MOI.ConstraintName(), "c_new")
source
set_attribute(
     model::Union{GenericModel,MOI.OptimizerWithAttributes},
     attr::Union{AbstractString,MOI.AbstractOptimizerAttribute},
     value,
@@ -1888,7 +1888,7 @@
 
 julia> set_attribute(opt, "output_flag", true)
 
-julia> set_attribute(opt, MOI.RawOptimizerAttribute("output_flag"), false)
source

set_attributes

JuMP.set_attributesFunction
set_attributes(
+julia> set_attribute(opt, MOI.RawOptimizerAttribute("output_flag"), false)
source

set_attributes

JuMP.set_attributesFunction
set_attributes(
     destination::Union{
         GenericModel,
         MOI.OptimizerWithAttributes,
@@ -1906,7 +1906,7 @@
 
 julia> set_attribute(model, "tol", 1e-4)
 
-julia> set_attribute(model, "max_iter", 100)
source

set_binary

JuMP.set_binaryFunction
set_binary(v::GenericVariableRef)

Add a constraint on the variable v that it must take values in the set $\{0,1\}$.

See also BinaryRef, is_binary, unset_binary.

Example

julia> model = Model();
+julia> set_attribute(model, "max_iter", 100)
source

set_binary

JuMP.set_binaryFunction
set_binary(v::GenericVariableRef)

Add a constraint on the variable v that it must take values in the set $\{0,1\}$.

See also BinaryRef, is_binary, unset_binary.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -1916,7 +1916,7 @@
 julia> set_binary(x)
 
 julia> is_binary(x)
-true
source

set_dual_start_value

JuMP.set_dual_start_valueFunction
set_dual_start_value(con_ref::ConstraintRef, value)

Set the dual start value (MOI attribute ConstraintDualStart) of the constraint con_ref to value.

To remove a dual start value set it to nothing.

See also dual_start_value.

Example

julia> model = Model();
+true
source

set_dual_start_value

JuMP.set_dual_start_valueFunction
set_dual_start_value(con_ref::ConstraintRef, value)

Set the dual start value (MOI attribute ConstraintDualStart) of the constraint con_ref to value.

To remove a dual start value set it to nothing.

See also dual_start_value.

Example

julia> model = Model();
 
 julia> @variable(model, x, start = 2.0);
 
@@ -1931,7 +1931,7 @@
 
 julia> set_dual_start_value(c, nothing)
 
-julia> dual_start_value(c)
source

set_integer

JuMP.set_integerFunction
set_integer(variable_ref::GenericVariableRef)

Add an integrality constraint on the variable variable_ref.

See also IntegerRef, is_integer, unset_integer.

Example

julia> model = Model();
+julia> dual_start_value(c)
source

set_integer

JuMP.set_integerFunction
set_integer(variable_ref::GenericVariableRef)

Add an integrality constraint on the variable variable_ref.

See also IntegerRef, is_integer, unset_integer.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -1941,7 +1941,7 @@
 julia> set_integer(x)
 
 julia> is_integer(x)
-true
source

set_lower_bound

JuMP.set_lower_boundFunction
set_lower_bound(v::GenericVariableRef, lower::Number)

Set the lower bound of a variable. If one does not exist, create a new lower bound constraint.

See also LowerBoundRef, has_lower_bound, lower_bound, delete_lower_bound.

Example

julia> model = Model();
+true
source

set_lower_bound

JuMP.set_lower_boundFunction
set_lower_bound(v::GenericVariableRef, lower::Number)

Set the lower bound of a variable. If one does not exist, create a new lower bound constraint.

See also LowerBoundRef, has_lower_bound, lower_bound, delete_lower_bound.

Example

julia> model = Model();
 
 julia> @variable(model, x >= 1.0);
 
@@ -1951,7 +1951,7 @@
 julia> set_lower_bound(x, 2.0)
 
 julia> lower_bound(x)
-2.0
source

set_name

JuMP.set_nameFunction
set_name(con_ref::ConstraintRef, s::AbstractString)

Set a constraint's name attribute.

Example

julia> model = Model();
+2.0
source

set_name

JuMP.set_nameFunction
set_name(con_ref::ConstraintRef, s::AbstractString)

Set a constraint's name attribute.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -1964,7 +1964,7 @@
 "my_constraint"
 
 julia> c
-my_constraint : [2 x] ∈ Nonnegatives()
source
set_name(v::GenericVariableRef, s::AbstractString)

Set a variable's name attribute.

Example

julia> model = Model();
+my_constraint : [2 x] ∈ Nonnegatives()
source
set_name(v::GenericVariableRef, s::AbstractString)

Set a variable's name attribute.

Example

julia> model = Model();
 
 julia> @variable(model, x)
 x
@@ -1975,7 +1975,7 @@
 x_foo
 
 julia> name(x)
-"x_foo"
source

set_normalized_coefficient

JuMP.set_normalized_coefficientFunction
set_normalized_coefficient(
+"x_foo"
source

set_normalized_coefficient

JuMP.set_normalized_coefficientFunction
set_normalized_coefficient(
     constraint::ConstraintRef,
     variable::GenericVariableRef,
     value::Number,
@@ -1990,7 +1990,7 @@
 julia> set_normalized_coefficient(con, x, 4)
 
 julia> con
-con : 4 x ≤ 2
source
set_normalized_coefficient(
+con : 4 x ≤ 2
source
set_normalized_coefficient(
     constraints::AbstractVector{<:ConstraintRef},
     variables::AbstractVector{<:GenericVariableRef},
     values::AbstractVector{<:Number},
@@ -2008,7 +2008,7 @@
 julia> set_normalized_coefficient([con, con], [x, y], [6, 7])
 
 julia> con
-con : 6 x + 7 y ≤ 2
source
set_normalized_coefficient(
+con : 6 x + 7 y ≤ 2
source
set_normalized_coefficient(
     con_ref::ConstraintRef,
     variable::AbstractVariableRef,
     new_coefficients::Vector{Tuple{Int64,T}},
@@ -2023,7 +2023,7 @@
 julia> set_normalized_coefficient(con, x, [(1, 2.0), (2, 5.0)])
 
 julia> con
-con : [2 x, 5 x] ∈ MathOptInterface.Nonnegatives(2)
source
set_normalized_coefficient(
+con : [2 x, 5 x] ∈ MathOptInterface.Nonnegatives(2)
source
set_normalized_coefficient(
     constraint::ConstraintRef,
     variable_1:GenericVariableRef,
     variable_2:GenericVariableRef,
@@ -2040,7 +2040,7 @@
 julia> set_normalized_coefficient(con, x[1], x[2], 5)
 
 julia> con
-con : 4 x[1]² + 5 x[1]*x[2] + x[2] ≤ 2
source
set_normalized_coefficient(
+con : 4 x[1]² + 5 x[1]*x[2] + x[2] ≤ 2
source
set_normalized_coefficient(
     constraints::AbstractVector{<:ConstraintRef},
     variables_1:AbstractVector{<:GenericVariableRef},
     variables_2:AbstractVector{<:GenericVariableRef},
@@ -2055,7 +2055,7 @@
 julia> set_normalized_coefficient([con, con], [x[1], x[1]], [x[1], x[2]], [4, 5])
 
 julia> con
-con : 4 x[1]² + 5 x[1]*x[2] + x[2] ≤ 2
source

set_normalized_rhs

JuMP.set_normalized_rhsFunction
set_normalized_rhs(constraint::ConstraintRef, value::Number)

Set the right-hand side term of constraint to value.

Note that prior to this step, JuMP will aggregate all constant terms onto the right-hand side of the constraint. For example, given a constraint 2x + 1 <= 2, set_normalized_rhs(con, 4) will create the constraint 2x <= 4, not 2x + 1 <= 4.

Example

julia> model = Model();
+con : 4 x[1]² + 5 x[1]*x[2] + x[2] ≤ 2
source

set_normalized_rhs

JuMP.set_normalized_rhsFunction
set_normalized_rhs(constraint::ConstraintRef, value::Number)

Set the right-hand side term of constraint to value.

Note that prior to this step, JuMP will aggregate all constant terms onto the right-hand side of the constraint. For example, given a constraint 2x + 1 <= 2, set_normalized_rhs(con, 4) will create the constraint 2x <= 4, not 2x + 1 <= 4.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -2065,7 +2065,7 @@
 julia> set_normalized_rhs(con, 4)
 
 julia> con
-con : 2 x ≤ 4
source
set_normalized_rhs(
+con : 2 x ≤ 4
source
set_normalized_rhs(
     constraints::AbstractVector{<:ConstraintRef},
     values::AbstractVector{<:Number}
 )

Set the right-hand side terms of all constraints to values.

Note that prior to this step, JuMP will aggregate all constant terms onto the right-hand side of the constraint. For example, given a constraint 2x + 1 <= 2, set_normalized_rhs([con], [4]) will create the constraint 2x <= 4, not 2x + 1 <= 4.

Example

julia> model = Model();
@@ -2084,12 +2084,12 @@
 con1 : 2 x ≤ 4
 
 julia> con2
-con2 : 3 x ≤ 5
source

set_objective

JuMP.set_objectiveFunction
set_objective(model::AbstractModel, sense::MOI.OptimizationSense, func)

The functional equivalent of the @objective macro.

Sets the objective sense and objective function simultaneously, and is equivalent to calling set_objective_sense and set_objective_function separately.

Example

julia> model = Model();
+con2 : 3 x ≤ 5
source

set_objective

JuMP.set_objectiveFunction
set_objective(model::AbstractModel, sense::MOI.OptimizationSense, func)

The functional equivalent of the @objective macro.

Sets the objective sense and objective function simultaneously, and is equivalent to calling set_objective_sense and set_objective_function separately.

Example

julia> model = Model();
 
 julia> @variable(model, x)
 x
 
-julia> set_objective(model, MIN_SENSE, x)
source

set_objective_coefficient

JuMP.set_objective_coefficientFunction
set_objective_coefficient(
+julia> set_objective(model, MIN_SENSE, x)
source

set_objective_coefficient

JuMP.set_objective_coefficientFunction
set_objective_coefficient(
     model::GenericModel,
     variable::GenericVariableRef,
     coefficient::Real,
@@ -2103,7 +2103,7 @@
 julia> set_objective_coefficient(model, x, 3)
 
 julia> objective_function(model)
-3 x + 1
source
set_objective_coefficient(
+3 x + 1
source
set_objective_coefficient(
     model::GenericModel,
     variables::Vector{<:GenericVariableRef},
     coefficients::Vector{<:Real},
@@ -2119,7 +2119,7 @@
 julia> set_objective_coefficient(model, [x, y], [5, 4])
 
 julia> objective_function(model)
-5 x + 4 y + 1
source
set_objective_coefficient(
+5 x + 4 y + 1
source
set_objective_coefficient(
     model::GenericModel{T},
     variable_1::GenericVariableRef{T},
     variable_2::GenericVariableRef{T},
@@ -2136,7 +2136,7 @@
 julia> set_objective_coefficient(model, x[1], x[2], 3)
 
 julia> objective_function(model)
-2 x[1]² + 3 x[1]*x[2]
source
set_objective_coefficient(
+2 x[1]² + 3 x[1]*x[2]
source
set_objective_coefficient(
     model::GenericModel{T},
     variables_1::AbstractVector{<:GenericVariableRef{T}},
     variables_2::AbstractVector{<:GenericVariableRef{T}},
@@ -2151,7 +2151,7 @@
 julia> set_objective_coefficient(model, [x[1], x[1]], [x[1], x[2]], [2, 3])
 
 julia> objective_function(model)
-2 x[1]² + 3 x[1]*x[2]
source

set_objective_function

JuMP.set_objective_functionFunction
set_objective_function(model::GenericModel, func::MOI.AbstractFunction)
+2 x[1]² + 3 x[1]*x[2]
source

set_objective_function

JuMP.set_objective_functionFunction
set_objective_function(model::GenericModel, func::MOI.AbstractFunction)
 set_objective_function(model::GenericModel, func::AbstractJuMPScalar)
 set_objective_function(model::GenericModel, func::Real)
 set_objective_function(model::GenericModel, func::Vector{<:AbstractJuMPScalar})

Sets the objective function of the model to the given function.

See set_objective_sense to set the objective sense.

These are low-level functions; the recommended way to set the objective is with the @objective macro.

Example

julia> model = Model();
@@ -2166,7 +2166,7 @@
 julia> set_objective_function(model, 2 * x + 1)
 
 julia> objective_function(model)
-2 x + 1
source

set_objective_sense

JuMP.set_objective_senseFunction
set_objective_sense(model::GenericModel, sense::MOI.OptimizationSense)

Sets the objective sense of the model to the given sense.

See set_objective_function to set the objective function.

These are low-level functions; the recommended way to set the objective is with the @objective macro.

Example

julia> model = Model();
+2 x + 1
source

set_objective_sense

JuMP.set_objective_senseFunction
set_objective_sense(model::GenericModel, sense::MOI.OptimizationSense)

Sets the objective sense of the model to the given sense.

See set_objective_function to set the objective function.

These are low-level functions; the recommended way to set the objective is with the @objective macro.

Example

julia> model = Model();
 
 julia> objective_sense(model)
 FEASIBILITY_SENSE::OptimizationSense = 2
@@ -2174,7 +2174,7 @@
 julia> set_objective_sense(model, MOI.MAX_SENSE)
 
 julia> objective_sense(model)
-MAX_SENSE::OptimizationSense = 1
source

set_optimize_hook

JuMP.set_optimize_hookFunction
set_optimize_hook(model::GenericModel, f::Union{Function,Nothing})

Set the function f as the optimize hook for model.

f should have a signature f(model::GenericModel; kwargs...), where the kwargs are those passed to optimize!.

Notes

  • The optimize hook should generally modify the model, or some external state in some way, and then call optimize!(model; ignore_optimize_hook = true) to optimize the problem, bypassing the hook.
  • Use set_optimize_hook(model, nothing) to unset an optimize hook.

Example

julia> model = Model();
+MAX_SENSE::OptimizationSense = 1
source

set_optimize_hook

JuMP.set_optimize_hookFunction
set_optimize_hook(model::GenericModel, f::Union{Function,Nothing})

Set the function f as the optimize hook for model.

f should have a signature f(model::GenericModel; kwargs...), where the kwargs are those passed to optimize!.

Notes

  • The optimize hook should generally modify the model, or some external state in some way, and then call optimize!(model; ignore_optimize_hook = true) to optimize the problem, bypassing the hook.
  • Use set_optimize_hook(model, nothing) to unset an optimize hook.

Example

julia> model = Model();
 
 julia> function my_hook(model::Model; kwargs...)
            println(kwargs)
@@ -2191,7 +2191,7 @@
 Base.Pairs{Symbol, Bool, Tuple{Symbol}, @NamedTuple{test_arg::Bool}}(:test_arg => 1)
 Calling with `ignore_optimize_hook = true`
 ERROR: NoOptimizer()
-[...]
source

set_optimizer

JuMP.set_optimizerFunction
set_optimizer(
+[...]
source

set_optimizer

JuMP.set_optimizerFunction
set_optimizer(
     model::GenericModel,
     optimizer_factory;
     add_bridges::Bool = true,
@@ -2201,7 +2201,7 @@
 
 julia> set_optimizer(model, () -> HiGHS.Optimizer())
 
-julia> set_optimizer(model, HiGHS.Optimizer; add_bridges = false)
source

set_parameter_value

JuMP.set_parameter_valueFunction
set_parameter_value(x::GenericVariableRef, value)

Update the parameter constraint on the variable x to value.

Errors if x is not a parameter.

See also ParameterRef, is_parameter, parameter_value.

Example

julia> model = Model();
+julia> set_optimizer(model, HiGHS.Optimizer; add_bridges = false)
source

set_parameter_value

JuMP.set_parameter_valueFunction
set_parameter_value(x::GenericVariableRef, value)

Update the parameter constraint on the variable x to value.

Errors if x is not a parameter.

See also ParameterRef, is_parameter, parameter_value.

Example

julia> model = Model();
 
 julia> @variable(model, p in Parameter(2))
 p
@@ -2212,7 +2212,7 @@
 julia> set_parameter_value(p, 2.5)
 
 julia> parameter_value(p)
-2.5
source

set_silent

JuMP.set_silentFunction
set_silent(model::GenericModel)

Takes precedence over any other attribute controlling verbosity and requires the solver to produce no output.

See also: unset_silent.

Example

julia> import Ipopt
+2.5
source

set_silent

JuMP.set_silentFunction
set_silent(model::GenericModel)

Takes precedence over any other attribute controlling verbosity and requires the solver to produce no output.

See also: unset_silent.

Example

julia> import Ipopt
 
 julia> model = Model(Ipopt.Optimizer);
 
@@ -2224,7 +2224,7 @@
 julia> unset_silent(model)
 
 julia> get_attribute(model, MOI.Silent())
-false
source

set_start_value

JuMP.set_start_valueFunction
set_start_value(con_ref::ConstraintRef, value)

Set the primal start value (MOI.ConstraintPrimalStart) of the constraint con_ref to value.

To remove a primal start value set it to nothing.

See also start_value.

Example

julia> model = Model();
+false
source

set_start_value

JuMP.set_start_valueFunction
set_start_value(con_ref::ConstraintRef, value)

Set the primal start value (MOI.ConstraintPrimalStart) of the constraint con_ref to value.

To remove a primal start value set it to nothing.

See also start_value.

Example

julia> model = Model();
 
 julia> @variable(model, x, start = 2.0);
 
@@ -2239,7 +2239,7 @@
 
 julia> set_start_value(c, nothing)
 
-julia> start_value(c)
source
set_start_value(variable::GenericVariableRef, value::Union{Real,Nothing})

Set the start value (MOI.VariablePrimalStart) of the variable to value.

Pass nothing to unset the start value.

Note: VariablePrimalStarts are sometimes called "MIP-starts" or "warmstarts".

See also: has_start_value, start_value.

Example

julia> model = Model();
+julia> start_value(c)
source
set_start_value(variable::GenericVariableRef, value::Union{Real,Nothing})

Set the start value (MOI.VariablePrimalStart) of the variable to value.

Pass nothing to unset the start value.

Note: VariablePrimalStarts are sometimes called "MIP-starts" or "warmstarts".

See also: has_start_value, start_value.

Example

julia> model = Model();
 
 julia> @variable(model, x, start = 1.5);
 
@@ -2265,13 +2265,13 @@
 true
 
 julia> start_value(y)
-2.0
source

set_start_values

JuMP.set_start_valuesFunction
set_start_values(
+2.0
source

set_start_values

JuMP.set_start_valuesFunction
set_start_values(
     model::GenericModel;
     variable_primal_start::Union{Nothing,Function} = value,
     constraint_primal_start::Union{Nothing,Function} = value,
     constraint_dual_start::Union{Nothing,Function} = dual,
     nonlinear_dual_start::Union{Nothing,Function} = nonlinear_dual_start_value,
-)

Set the primal and dual starting values in model using the functions provided.

If any keyword argument is nothing, the corresponding start value is skipped.

If the optimizer does not support setting the starting value, the value will be skipped.

variable_primal_start

This function controls the primal starting solution for the variables. It is equivalent to calling set_start_value for each variable, or setting the MOI.VariablePrimalStart attribute.

If it is a function, it must have the form variable_primal_start(x::VariableRef) that maps each variable x to the starting primal value.

The default is value.

constraint_primal_start

This function controls the primal starting solution for the constraints. It is equivalent to calling set_start_value for each constraint, or setting the MOI.ConstraintPrimalStart attribute.

If it is a function, it must have the form constraint_primal_start(ci::ConstraintRef) that maps each constraint ci to the starting primal value.

The default is value.

constraint_dual_start

This function controls the dual starting solution for the constraints. It is equivalent to calling set_dual_start_value for each constraint, or setting the MOI.ConstraintDualStart attribute.

If it is a function, it must have the form constraint_dual_start(ci::ConstraintRef) that maps each constraint ci to the starting dual value.

The default is dual.

nonlinear_dual_start

This function controls the dual starting solution for the nonlinear constraints It is equivalent to calling set_nonlinear_dual_start_value.

If it is a function, it must have the form nonlinear_dual_start(model::GenericModel) that returns a vector corresponding to the dual start of the constraints.

The default is nonlinear_dual_start_value.

source

set_string_names_on_creation

JuMP.set_string_names_on_creationFunction
set_string_names_on_creation(model::GenericModel, value::Bool)

Set the default argument of the set_string_name keyword in the @variable and @constraint macros to value.

The set_string_name keyword is used to determine whether to assign String names to all variables and constraints in model.

By default, value is true. However, for larger models calling set_string_names_on_creation(model, false) can improve performance at the cost of reducing the readability of printing and solver log messages.

Example

julia> import HiGHS
+)

Set the primal and dual starting values in model using the functions provided.

If any keyword argument is nothing, the corresponding start value is skipped.

If the optimizer does not support setting the starting value, the value will be skipped.

variable_primal_start

This function controls the primal starting solution for the variables. It is equivalent to calling set_start_value for each variable, or setting the MOI.VariablePrimalStart attribute.

If it is a function, it must have the form variable_primal_start(x::VariableRef) that maps each variable x to the starting primal value.

The default is value.

constraint_primal_start

This function controls the primal starting solution for the constraints. It is equivalent to calling set_start_value for each constraint, or setting the MOI.ConstraintPrimalStart attribute.

If it is a function, it must have the form constraint_primal_start(ci::ConstraintRef) that maps each constraint ci to the starting primal value.

The default is value.

constraint_dual_start

This function controls the dual starting solution for the constraints. It is equivalent to calling set_dual_start_value for each constraint, or setting the MOI.ConstraintDualStart attribute.

If it is a function, it must have the form constraint_dual_start(ci::ConstraintRef) that maps each constraint ci to the starting dual value.

The default is dual.

nonlinear_dual_start

This function controls the dual starting solution for the nonlinear constraints It is equivalent to calling set_nonlinear_dual_start_value.

If it is a function, it must have the form nonlinear_dual_start(model::GenericModel) that returns a vector corresponding to the dual start of the constraints.

The default is nonlinear_dual_start_value.

source

set_string_names_on_creation

JuMP.set_string_names_on_creationFunction
set_string_names_on_creation(model::GenericModel, value::Bool)

Set the default argument of the set_string_name keyword in the @variable and @constraint macros to value.

The set_string_name keyword is used to determine whether to assign String names to all variables and constraints in model.

By default, value is true. However, for larger models calling set_string_names_on_creation(model, false) can improve performance at the cost of reducing the readability of printing and solver log messages.

Example

julia> import HiGHS
 
 julia> model = Model(HiGHS.Optimizer);
 
@@ -2281,7 +2281,7 @@
 julia> set_string_names_on_creation(model, false)
 
 julia> set_string_names_on_creation(model)
-false
source

set_time_limit_sec

JuMP.set_time_limit_secFunction
set_time_limit_sec(model::GenericModel, limit::Float64)

Set the time limit (in seconds) of the solver.

Can be unset using unset_time_limit_sec or with limit set to nothing.

See also: unset_time_limit_sec, time_limit_sec.

Example

julia> import Ipopt
+false
source

set_time_limit_sec

JuMP.set_time_limit_secFunction
set_time_limit_sec(model::GenericModel, limit::Float64)

Set the time limit (in seconds) of the solver.

Can be unset using unset_time_limit_sec or with limit set to nothing.

See also: unset_time_limit_sec, time_limit_sec.

Example

julia> import Ipopt
 
 julia> model = Model(Ipopt.Optimizer);
 
@@ -2294,7 +2294,7 @@
 
 julia> unset_time_limit_sec(model)
 
-julia> time_limit_sec(model)
source

set_upper_bound

JuMP.set_upper_boundFunction
set_upper_bound(v::GenericVariableRef, upper::Number)

Set the upper bound of a variable. If one does not exist, create an upper bound constraint.

See also UpperBoundRef, has_upper_bound, upper_bound, delete_upper_bound.

Example

julia> model = Model();
+julia> time_limit_sec(model)
source

set_upper_bound

JuMP.set_upper_boundFunction
set_upper_bound(v::GenericVariableRef, upper::Number)

Set the upper bound of a variable. If one does not exist, create an upper bound constraint.

See also UpperBoundRef, has_upper_bound, upper_bound, delete_upper_bound.

Example

julia> model = Model();
 
 julia> @variable(model, x <= 1.0);
 
@@ -2304,7 +2304,7 @@
 julia> set_upper_bound(x, 2.0)
 
 julia> upper_bound(x)
-2.0
source

shadow_price

JuMP.shadow_priceFunction
shadow_price(con_ref::ConstraintRef)

Return the change in the objective from an infinitesimal relaxation of the constraint.

The shadow price is computed from dual and can be queried only when has_duals is true and the objective sense is MIN_SENSE or MAX_SENSE (not FEASIBILITY_SENSE).

See also reduced_cost.

Comparison to dual

The shadow prices differ at most in sign from the dual value depending on the objective sense. The differences are summarized in the table:

MinMax
f(x) <= b+1-1
f(x) >= b-1+1

Notes

  • The function simply translates signs from dual and does not validate the conditions needed to guarantee the sensitivity interpretation of the shadow price. The caller is responsible, for example, for checking whether the solver converged to an optimal primal-dual pair or a proof of infeasibility.
  • The computation is based on the current objective sense of the model. If this has changed since the last solve, the results will be incorrect.
  • Relaxation of equality constraints (and hence the shadow price) is defined based on which sense of the equality constraint is active.

Example

julia> import HiGHS
+2.0
source

shadow_price

JuMP.shadow_priceFunction
shadow_price(con_ref::ConstraintRef)

Return the change in the objective from an infinitesimal relaxation of the constraint.

The shadow price is computed from dual and can be queried only when has_duals is true and the objective sense is MIN_SENSE or MAX_SENSE (not FEASIBILITY_SENSE).

See also reduced_cost.

Comparison to dual

The shadow prices differ at most in sign from the dual value depending on the objective sense. The differences are summarized in the table:

MinMax
f(x) <= b+1-1
f(x) >= b-1+1

Notes

  • The function simply translates signs from dual and does not validate the conditions needed to guarantee the sensitivity interpretation of the shadow price. The caller is responsible, for example, for checking whether the solver converged to an optimal primal-dual pair or a proof of infeasibility.
  • The computation is based on the current objective sense of the model. If this has changed since the last solve, the results will be incorrect.
  • Relaxation of equality constraints (and hence the shadow price) is defined based on which sense of the equality constraint is active.

Example

julia> import HiGHS
 
 julia> model = Model(HiGHS.Optimizer);
 
@@ -2323,7 +2323,7 @@
 true
 
 julia> shadow_price(c)
-2.0
source

shape

JuMP.shapeFunction
shape(c::AbstractConstraint)::AbstractShape

Return the shape of the constraint c.

Example

julia> model = Model();
+2.0
source

shape

JuMP.shapeFunction
shape(c::AbstractConstraint)::AbstractShape

Return the shape of the constraint c.

Example

julia> model = Model();
 
 julia> @variable(model, x[1:2]);
 
@@ -2335,20 +2335,20 @@
 julia> d = @constraint(model, x in SOS1());
 
 julia> shape(constraint_object(d))
-VectorShape()
source

show_backend_summary

JuMP.show_backend_summaryFunction
show_backend_summary(io::IO, model::GenericModel)

Print a summary of the optimizer backing model.

Extensions

AbstractModels should implement this method.

Example

julia> model = Model();
+VectorShape()
source

show_backend_summary

JuMP.show_backend_summaryFunction
show_backend_summary(io::IO, model::GenericModel)

Print a summary of the optimizer backing model.

Extensions

AbstractModels should implement this method.

Example

julia> model = Model();
 
 julia> show_backend_summary(stdout, model)
 Model mode: AUTOMATIC
 CachingOptimizer state: NO_OPTIMIZER
-Solver name: No optimizer attached.
source

show_constraints_summary

JuMP.show_constraints_summaryFunction
show_constraints_summary(io::IO, model::AbstractModel)

Write to io a summary of the number of constraints.

Extensions

AbstractModels should implement this method.

Example

julia> model = Model();
+Solver name: No optimizer attached.
source

show_constraints_summary

JuMP.show_constraints_summaryFunction
show_constraints_summary(io::IO, model::AbstractModel)

Write to io a summary of the number of constraints.

Extensions

AbstractModels should implement this method.

Example

julia> model = Model();
 
 julia> @variable(model, x >= 0);
 
 julia> show_constraints_summary(stdout, model)
-`VariableRef`-in-`MathOptInterface.GreaterThan{Float64}`: 1 constraint
source

show_objective_function_summary

JuMP.show_objective_function_summaryFunction
show_objective_function_summary(io::IO, model::AbstractModel)

Write to io a summary of the objective function type.

Extensions

AbstractModels should implement this method.

Example

julia> model = Model();
+`VariableRef`-in-`MathOptInterface.GreaterThan{Float64}`: 1 constraint
source

show_objective_function_summary

JuMP.show_objective_function_summaryFunction
show_objective_function_summary(io::IO, model::AbstractModel)

Write to io a summary of the objective function type.

Extensions

AbstractModels should implement this method.

Example

julia> model = Model();
 
 julia> show_objective_function_summary(stdout, model)
-Objective function type: AffExpr
source

simplex_iterations

JuMP.simplex_iterationsFunction
simplex_iterations(model::GenericModel)

If available, returns the cumulative number of simplex iterations during the most-recent optimization (the MOI.SimplexIterations attribute).

Throws a MOI.GetAttributeNotAllowed error if the attribute is not implemented by the solver.

Example

julia> import HiGHS
+Objective function type: AffExpr
source

simplex_iterations

JuMP.simplex_iterationsFunction
simplex_iterations(model::GenericModel)

If available, returns the cumulative number of simplex iterations during the most-recent optimization (the MOI.SimplexIterations attribute).

Throws a MOI.GetAttributeNotAllowed error if the attribute is not implemented by the solver.

Example

julia> import HiGHS
 
 julia> model = Model(HiGHS.Optimizer);
 
@@ -2357,7 +2357,7 @@
 julia> optimize!(model)
 
 julia> simplex_iterations(model)
-0
source

solution_summary

JuMP.solution_summaryFunction
solution_summary(model::GenericModel; result::Int = 1, verbose::Bool = false)

Return a struct that can be used print a summary of the solution in result result.

If verbose=true, write out the primal solution for every variable and the dual solution for every constraint, excluding those with empty names.

Example

When called at the REPL, the summary is automatically printed:

julia> model = Model();
+0
source

solution_summary

JuMP.solution_summaryFunction
solution_summary(model::GenericModel; result::Int = 1, verbose::Bool = false)

Return a struct that can be used print a summary of the solution in result result.

If verbose=true, write out the primal solution for every variable and the dual solution for every constraint, excluding those with empty names.

Example

When called at the REPL, the summary is automatically printed:

julia> model = Model();
 
 julia> solution_summary(model)
 * Solver : No optimizer attached.
@@ -2393,7 +2393,7 @@
   Primal status      : NO_SOLUTION
   Dual status        : NO_SOLUTION
 
-* Work counters
source

solve_time

JuMP.solve_timeFunction
solve_time(model::GenericModel)

If available, returns the solve time in wall-clock seconds reported by the solver (the MOI.SolveTimeSec attribute).

Throws a MOI.GetAttributeNotAllowed error if the attribute is not implemented by the solver.

Example

julia> import HiGHS
+* Work counters
source

solve_time

JuMP.solve_timeFunction
solve_time(model::GenericModel)

If available, returns the solve time in wall-clock seconds reported by the solver (the MOI.SolveTimeSec attribute).

Throws a MOI.GetAttributeNotAllowed error if the attribute is not implemented by the solver.

Example

julia> import HiGHS
 
 julia> model = Model(HiGHS.Optimizer);
 
@@ -2402,7 +2402,7 @@
 julia> optimize!(model)
 
 julia> solve_time(model)
-1.0488089174032211e-5
source

solver_name

JuMP.solver_nameFunction
solver_name(model::GenericModel)

If available, returns the MOI.SolverName property of the underlying optimizer.

Returns "No optimizer attached." in AUTOMATIC or MANUAL modes when no optimizer is attached.

Returns "SolverName() attribute not implemented by the optimizer." if the attribute is not implemented.

Example

julia> import Ipopt
+1.0488089174032211e-5
source

solver_name

JuMP.solver_nameFunction
solver_name(model::GenericModel)

If available, returns the MOI.SolverName property of the underlying optimizer.

Returns "No optimizer attached." in AUTOMATIC or MANUAL modes when no optimizer is attached.

Returns "SolverName() attribute not implemented by the optimizer." if the attribute is not implemented.

Example

julia> import Ipopt
 
 julia> model = Model(Ipopt.Optimizer);
 
@@ -2417,7 +2417,7 @@
 julia> model = Model(MOI.FileFormats.MPS.Model);
 
 julia> solver_name(model)
-"SolverName() attribute not implemented by the optimizer."
source

start_value

JuMP.start_valueFunction
start_value(con_ref::ConstraintRef)

Return the primal start value (MOI.ConstraintPrimalStart) of the constraint con_ref.

If no primal start value has been set, start_value will return nothing.

See also set_start_value.

Example

julia> model = Model();
+"SolverName() attribute not implemented by the optimizer."
source

start_value

JuMP.start_valueFunction
start_value(con_ref::ConstraintRef)

Return the primal start value (MOI.ConstraintPrimalStart) of the constraint con_ref.

If no primal start value has been set, start_value will return nothing.

See also set_start_value.

Example

julia> model = Model();
 
 julia> @variable(model, x, start = 2.0);
 
@@ -2432,7 +2432,7 @@
 
 julia> set_start_value(c, nothing)
 
-julia> start_value(c)
source
start_value(v::GenericVariableRef)

Return the start value (MOI.VariablePrimalStart) of the variable v.

Note: VariablePrimalStarts are sometimes called "MIP-starts" or "warmstarts".

See also: has_start_value, set_start_value.

Example

julia> model = Model();
+julia> start_value(c)
source
start_value(v::GenericVariableRef)

Return the start value (MOI.VariablePrimalStart) of the variable v.

Note: VariablePrimalStarts are sometimes called "MIP-starts" or "warmstarts".

See also: has_start_value, set_start_value.

Example

julia> model = Model();
 
 julia> @variable(model, x, start = 1.5);
 
@@ -2453,12 +2453,12 @@
 true
 
 julia> start_value(y)
-2.0
source

termination_status

JuMP.termination_statusFunction
termination_status(model::GenericModel)

Return a MOI.TerminationStatusCode describing why the solver stopped (that is, the MOI.TerminationStatus attribute).

Example

julia> import Ipopt
+2.0
source

termination_status

JuMP.termination_statusFunction
termination_status(model::GenericModel)

Return a MOI.TerminationStatusCode describing why the solver stopped (that is, the MOI.TerminationStatus attribute).

Example

julia> import Ipopt
 
 julia> model = Model(Ipopt.Optimizer);
 
 julia> termination_status(model)
-OPTIMIZE_NOT_CALLED::TerminationStatusCode = 0
source

time_limit_sec

JuMP.time_limit_secFunction
time_limit_sec(model::GenericModel)

Return the time limit (in seconds) of the model.

Returns nothing if unset.

See also: set_time_limit_sec, unset_time_limit_sec.

Example

julia> import Ipopt
+OPTIMIZE_NOT_CALLED::TerminationStatusCode = 0
source

time_limit_sec

JuMP.time_limit_secFunction
time_limit_sec(model::GenericModel)

Return the time limit (in seconds) of the model.

Returns nothing if unset.

See also: set_time_limit_sec, unset_time_limit_sec.

Example

julia> import Ipopt
 
 julia> model = Model(Ipopt.Optimizer);
 
@@ -2471,7 +2471,7 @@
 
 julia> unset_time_limit_sec(model)
 
-julia> time_limit_sec(model)
source

triangle_vec

JuMP.triangle_vecFunction
triangle_vec(matrix::Matrix)

Return the upper triangle of a matrix concatenated into a vector in the order required by JuMP and MathOptInterface for Triangle sets.

Example

julia> model = Model();
+julia> time_limit_sec(model)
source

triangle_vec

JuMP.triangle_vecFunction
triangle_vec(matrix::Matrix)

Return the upper triangle of a matrix concatenated into a vector in the order required by JuMP and MathOptInterface for Triangle sets.

Example

julia> model = Model();
 
 julia> @variable(model, X[1:3, 1:3], Symmetric);
 
@@ -2479,7 +2479,7 @@
 t
 
 julia> @constraint(model, [t; triangle_vec(X)] in MOI.RootDetConeTriangle(3))
-[t, X[1,1], X[1,2], X[2,2], X[1,3], X[2,3], X[3,3]] ∈ MathOptInterface.RootDetConeTriangle(3)
source

unfix

JuMP.unfixFunction
unfix(v::GenericVariableRef)

Delete the fixing constraint of a variable.

Error if one does not exist.

See also FixRef, is_fixed, fix_value, fix.

Example

julia> model = Model();
+[t, X[1,1], X[1,2], X[2,2], X[1,3], X[2,3], X[3,3]] ∈ MathOptInterface.RootDetConeTriangle(3)
source

unfix

JuMP.unfixFunction
unfix(v::GenericVariableRef)

Delete the fixing constraint of a variable.

Error if one does not exist.

See also FixRef, is_fixed, fix_value, fix.

Example

julia> model = Model();
 
 julia> @variable(model, x == 1);
 
@@ -2489,7 +2489,7 @@
 julia> unfix(x)
 
 julia> is_fixed(x)
-false
source

unregister

JuMP.unregisterFunction
unregister(model::GenericModel, key::Symbol)

Unregister the name key from model so that a new variable, constraint, or expression can be created with the same key.

Note that this will not delete the object model[key]; it will just remove the reference at model[key]. To delete the object, use delete as well.

See also: delete, object_dictionary.

Example

julia> model = Model();
+false
source

unregister

JuMP.unregisterFunction
unregister(model::GenericModel, key::Symbol)

Unregister the name key from model so that a new variable, constraint, or expression can be created with the same key.

Note that this will not delete the object model[key]; it will just remove the reference at model[key]. To delete the object, use delete as well.

See also: delete, object_dictionary.

Example

julia> model = Model();
 
 julia> @variable(model, x)
 x
@@ -2516,7 +2516,7 @@
 x
 
 julia> num_variables(model)
-2
source

unsafe_backend

JuMP.unsafe_backendFunction
unsafe_backend(model::GenericModel)

Return the innermost optimizer associated with the JuMP model model.

This function should only be used by advanced users looking to access low-level solver-specific functionality. It has a high-risk of incorrect usage. We strongly suggest you use the alternative suggested below.

See also: backend.

To obtain the index of a variable or constraint in the unsafe backend, use optimizer_index.

Unsafe behavior

This function is unsafe for two main reasons.

First, the formulation and order of variables and constraints in the unsafe backend may be different to the variables and constraints in model. This can happen because of bridges, or because the solver requires the variables or constraints in a specific order. In addition, the variable or constraint index returned by index at the JuMP level may be different to the index of the corresponding variable or constraint in the unsafe_backend. There is no solution to this. Use the alternative suggested below instead.

Second, the unsafe_backend may be empty, or lack some modifications made to the JuMP model. Thus, before calling unsafe_backend you should first call MOI.Utilities.attach_optimizer to ensure that the backend is synchronized with the JuMP model.

julia> import HiGHS
+2
source

unsafe_backend

JuMP.unsafe_backendFunction
unsafe_backend(model::GenericModel)

Return the innermost optimizer associated with the JuMP model model.

This function should only be used by advanced users looking to access low-level solver-specific functionality. It has a high-risk of incorrect usage. We strongly suggest you use the alternative suggested below.

See also: backend.

To obtain the index of a variable or constraint in the unsafe backend, use optimizer_index.

Unsafe behavior

This function is unsafe for two main reasons.

First, the formulation and order of variables and constraints in the unsafe backend may be different to the variables and constraints in model. This can happen because of bridges, or because the solver requires the variables or constraints in a specific order. In addition, the variable or constraint index returned by index at the JuMP level may be different to the index of the corresponding variable or constraint in the unsafe_backend. There is no solution to this. Use the alternative suggested below instead.

Second, the unsafe_backend may be empty, or lack some modifications made to the JuMP model. Thus, before calling unsafe_backend you should first call MOI.Utilities.attach_optimizer to ensure that the backend is synchronized with the JuMP model.

julia> import HiGHS
 
 julia> model = Model(HiGHS.Optimizer)
 A JuMP Model
@@ -2557,7 +2557,7 @@
 A HiGHS model with 1 columns and 0 rows.
 
 julia> index(x)
-MOI.VariableIndex(1)
source

unset_binary

JuMP.unset_binaryFunction
unset_binary(variable_ref::GenericVariableRef)

Remove the binary constraint on the variable variable_ref.

See also BinaryRef, is_binary, set_binary.

Example

julia> model = Model();
+MOI.VariableIndex(1)
source

unset_binary

JuMP.unset_binaryFunction
unset_binary(variable_ref::GenericVariableRef)

Remove the binary constraint on the variable variable_ref.

See also BinaryRef, is_binary, set_binary.

Example

julia> model = Model();
 
 julia> @variable(model, x, Bin);
 
@@ -2567,7 +2567,7 @@
 julia> unset_binary(x)
 
 julia> is_binary(x)
-false
source

unset_integer

JuMP.unset_integerFunction
unset_integer(variable_ref::GenericVariableRef)

Remove the integrality constraint on the variable variable_ref.

Errors if one does not exist.

See also IntegerRef, is_integer, set_integer.

Example

julia> model = Model();
+false
source

unset_integer

JuMP.unset_integerFunction
unset_integer(variable_ref::GenericVariableRef)

Remove the integrality constraint on the variable variable_ref.

Errors if one does not exist.

See also IntegerRef, is_integer, set_integer.

Example

julia> model = Model();
 
 julia> @variable(model, x, Int);
 
@@ -2577,7 +2577,7 @@
 julia> unset_integer(x)
 
 julia> is_integer(x)
-false
source

unset_silent

JuMP.unset_silentFunction
unset_silent(model::GenericModel)

Neutralize the effect of the set_silent function and let the solver attributes control the verbosity.

See also: set_silent.

Example

julia> import Ipopt
+false
source

unset_silent

JuMP.unset_silentFunction
unset_silent(model::GenericModel)

Neutralize the effect of the set_silent function and let the solver attributes control the verbosity.

See also: set_silent.

Example

julia> import Ipopt
 
 julia> model = Model(Ipopt.Optimizer);
 
@@ -2589,7 +2589,7 @@
 julia> unset_silent(model)
 
 julia> get_attribute(model, MOI.Silent())
-false
source

unset_time_limit_sec

JuMP.unset_time_limit_secFunction
unset_time_limit_sec(model::GenericModel)

Unset the time limit of the solver.

See also: set_time_limit_sec, time_limit_sec.

Example

julia> import Ipopt
+false
source

unset_time_limit_sec

JuMP.unset_time_limit_secFunction
unset_time_limit_sec(model::GenericModel)

Unset the time limit of the solver.

See also: set_time_limit_sec, time_limit_sec.

Example

julia> import Ipopt
 
 julia> model = Model(Ipopt.Optimizer);
 
@@ -2602,19 +2602,19 @@
 
 julia> unset_time_limit_sec(model)
 
-julia> time_limit_sec(model)
source

upper_bound

JuMP.upper_boundFunction
upper_bound(v::GenericVariableRef)

Return the upper bound of a variable.

Error if one does not exist.

See also UpperBoundRef, has_upper_bound, set_upper_bound, delete_upper_bound.

Example

julia> model = Model();
+julia> time_limit_sec(model)
source

upper_bound

JuMP.upper_boundFunction
upper_bound(v::GenericVariableRef)

Return the upper bound of a variable.

Error if one does not exist.

See also UpperBoundRef, has_upper_bound, set_upper_bound, delete_upper_bound.

Example

julia> model = Model();
 
 julia> @variable(model, x <= 1.0);
 
 julia> upper_bound(x)
-1.0
source

value

JuMP.valueFunction
value(con_ref::ConstraintRef; result::Int = 1)

Return the primal value of constraint con_ref associated with result index result of the most-recent solution returned by the solver.

That is, if con_ref is the reference of a constraint func-in-set, it returns the value of func evaluated at the value of the variables (given by value(::GenericVariableRef)).

Use has_values to check if a result exists before asking for values.

See also: result_count.

Note

For scalar constraints, the constant is moved to the set so it is not taken into account in the primal value of the constraint. For instance, the constraint @constraint(model, 2x + 3y + 1 == 5) is transformed into 2x + 3y-in-MOI.EqualTo(4) so the value returned by this function is the evaluation of 2x + 3y.

source
value(var_value::Function, con_ref::ConstraintRef)

Evaluate the primal value of the constraint con_ref using var_value(v) as the value for each variable v.

source
value(v::GenericVariableRef; result = 1)

Return the value of variable v associated with result index result of the most-recent returned by the solver.

Use has_values to check if a result exists before asking for values.

See also: result_count.

source
value(var_value::Function, v::GenericVariableRef)

Evaluate the value of the variable v as var_value(v).

source
value(var_value::Function, ex::GenericAffExpr)

Evaluate ex using var_value(v) as the value for each variable v.

source
value(v::GenericAffExpr; result::Int = 1)

Return the value of the GenericAffExpr v associated with result index result of the most-recent solution returned by the solver.

See also: result_count.

source
value(var_value::Function, ex::GenericQuadExpr)

Evaluate ex using var_value(v) as the value for each variable v.

source
value(v::GenericQuadExpr; result::Int = 1)

Return the value of the GenericQuadExpr v associated with result index result of the most-recent solution returned by the solver.

Replaces getvalue for most use cases.

See also: result_count.

source
value(p::NonlinearParameter)

Return the current value stored in the nonlinear parameter p.

Example

julia> model = Model();
+1.0
source

value

JuMP.valueFunction
value(con_ref::ConstraintRef; result::Int = 1)

Return the primal value of constraint con_ref associated with result index result of the most-recent solution returned by the solver.

That is, if con_ref is the reference of a constraint func-in-set, it returns the value of func evaluated at the value of the variables (given by value(::GenericVariableRef)).

Use has_values to check if a result exists before asking for values.

See also: result_count.

Note

For scalar constraints, the constant is moved to the set so it is not taken into account in the primal value of the constraint. For instance, the constraint @constraint(model, 2x + 3y + 1 == 5) is transformed into 2x + 3y-in-MOI.EqualTo(4) so the value returned by this function is the evaluation of 2x + 3y.

source
value(var_value::Function, con_ref::ConstraintRef)

Evaluate the primal value of the constraint con_ref using var_value(v) as the value for each variable v.

source
value(v::GenericVariableRef; result = 1)

Return the value of variable v associated with result index result of the most-recent returned by the solver.

Use has_values to check if a result exists before asking for values.

See also: result_count.

source
value(var_value::Function, v::GenericVariableRef)

Evaluate the value of the variable v as var_value(v).

source
value(var_value::Function, ex::GenericAffExpr)

Evaluate ex using var_value(v) as the value for each variable v.

source
value(v::GenericAffExpr; result::Int = 1)

Return the value of the GenericAffExpr v associated with result index result of the most-recent solution returned by the solver.

See also: result_count.

source
value(var_value::Function, ex::GenericQuadExpr)

Evaluate ex using var_value(v) as the value for each variable v.

source
value(v::GenericQuadExpr; result::Int = 1)

Return the value of the GenericQuadExpr v associated with result index result of the most-recent solution returned by the solver.

Replaces getvalue for most use cases.

See also: result_count.

source
value(p::NonlinearParameter)

Return the current value stored in the nonlinear parameter p.

Example

julia> model = Model();
 
 julia> @NLparameter(model, p == 10)
 p == 10.0
 
 julia> value(p)
-10.0
source
value(ex::NonlinearExpression; result::Int = 1)

Return the value of the NonlinearExpression ex associated with result index result of the most-recent solution returned by the solver.

See also: result_count.

source
value(var_value::Function, ex::NonlinearExpression)

Evaluate ex using var_value(v) as the value for each variable v.

source
value(c::NonlinearConstraintRef; result::Int = 1)

Return the value of the NonlinearConstraintRef c associated with result index result of the most-recent solution returned by the solver.

See also: result_count.

source
value(var_value::Function, c::NonlinearConstraintRef)

Evaluate c using var_value(v) as the value for each variable v.

source

value_type

JuMP.value_typeFunction
value_type(::Type{<:Union{AbstractModel,AbstractVariableRef}})

Return the return type of value for variables of that model. It defaults to Float64 if it is not implemented.

Example

julia> value_type(GenericModel{BigFloat})
-BigFloat
source

variable_by_name

JuMP.variable_by_nameFunction
variable_by_name(
+10.0
source
value(ex::NonlinearExpression; result::Int = 1)

Return the value of the NonlinearExpression ex associated with result index result of the most-recent solution returned by the solver.

See also: result_count.

source
value(var_value::Function, ex::NonlinearExpression)

Evaluate ex using var_value(v) as the value for each variable v.

source
value(c::NonlinearConstraintRef; result::Int = 1)

Return the value of the NonlinearConstraintRef c associated with result index result of the most-recent solution returned by the solver.

See also: result_count.

source
value(var_value::Function, c::NonlinearConstraintRef)

Evaluate c using var_value(v) as the value for each variable v.

source

value_type

JuMP.value_typeFunction
value_type(::Type{<:Union{AbstractModel,AbstractVariableRef}})

Return the return type of value for variables of that model. It defaults to Float64 if it is not implemented.

Example

julia> value_type(GenericModel{BigFloat})
+BigFloat
source

variable_by_name

JuMP.variable_by_nameFunction
variable_by_name(
     model::AbstractModel,
     name::String,
 )::Union{AbstractVariableRef,Nothing}

Returns the reference of the variable with name attribute name or Nothing if no variable has this name attribute. Throws an error if several variables have name as their name attribute.

Example

julia> model = Model();
@@ -2657,12 +2657,12 @@
  u[2]
 
 julia> variable_by_name(model, "u[2]")
-u[2]
source

variable_ref_type

JuMP.variable_ref_typeFunction
variable_ref_type(::Union{F,Type{F}}) where {F}

A helper function used internally by JuMP and some JuMP extensions. Returns the variable type associated with the model or expression type F.

source

vectorize

JuMP.vectorizeFunction
vectorize(matrix::AbstractMatrix, ::Shape)

Convert the matrix into a vector according to Shape.

source

write_to_file

JuMP.write_to_fileFunction
write_to_file(
+u[2]
source

variable_ref_type

JuMP.variable_ref_typeFunction
variable_ref_type(::Union{F,Type{F}}) where {F}

A helper function used internally by JuMP and some JuMP extensions. Returns the variable type associated with the model or expression type F.

source

vectorize

JuMP.vectorizeFunction
vectorize(matrix::AbstractMatrix, ::Shape)

Convert the matrix into a vector according to Shape.

source

write_to_file

JuMP.write_to_fileFunction
write_to_file(
     model::GenericModel,
     filename::String;
     format::MOI.FileFormats.FileFormat = MOI.FileFormats.FORMAT_AUTOMATIC,
     kwargs...,
-)

Write the JuMP model model to filename in the format format.

If the filename ends in .gz, it will be compressed using GZip. If the filename ends in .bz2, it will be compressed using BZip2.

Other kwargs are passed to the Model constructor of the chosen format.

source

AbstractConstraint

JuMP.AbstractConstraintType
abstract type AbstractConstraint

An abstract base type for all constraint types. AbstractConstraints store the function and set directly, unlike ConstraintRefs that are merely references to constraints stored in a model. AbstractConstraints do not need to be attached to a model.

source

AbstractJuMPScalar

JuMP.AbstractJuMPScalarType
AbstractJuMPScalar <: MutableArithmetics.AbstractMutable

Abstract base type for all scalar types

The subtyping of AbstractMutable will allow calls of some Base functions to be redirected to a method in MA that handles type promotion more carefully (for example the promotion in sparse matrix products in SparseArrays usually does not work for JuMP types) and exploits the mutability of AffExpr and QuadExpr.

source

AbstractModel

JuMP.AbstractModelType
AbstractModel

An abstract type that should be subtyped for users creating JuMP extensions.

source

AbstractScalarSet

JuMP.AbstractScalarSetType
AbstractScalarSet

An abstract type for defining new scalar sets in JuMP.

Implement moi_set(::AbstractScalarSet) to convert the type into an MOI set.

See also: moi_set.

source

AbstractShape

JuMP.AbstractShapeType
AbstractShape

Abstract vectorizable shape. Given a flat vector form of an object of shape shape, the original object can be obtained by reshape_vector.

source

AbstractVariable

JuMP.AbstractVariableType
AbstractVariable

Variable returned by build_variable. It represents a variable that has not been added yet to any model. It can be added to a given model with add_variable.

source

AbstractVariableRef

JuMP.AbstractVariableRefType
AbstractVariableRef

Variable returned by add_variable. Affine (resp. quadratic) operations with variables of type V<:AbstractVariableRef and coefficients of type T create a GenericAffExpr{T,V} (resp. GenericQuadExpr{T,V}).

source

AbstractVectorSet

JuMP.AbstractVectorSetType
AbstractVectorSet

An abstract type for defining new sets in JuMP.

Implement moi_set(::AbstractVectorSet, dim::Int) to convert the type into an MOI set.

See also: moi_set.

source

AffExpr

JuMP.AffExprType
AffExpr

Alias for GenericAffExpr{Float64,VariableRef}, the specific GenericAffExpr used by JuMP.

source

ArrayShape

JuMP.ArrayShapeType
ArrayShape{N}(dims::NTuple{N,Int}) where {N}

An AbstractShape that represents array-valued constraints.

Example

julia> model = Model();
+)

Write the JuMP model model to filename in the format format.

If the filename ends in .gz, it will be compressed using GZip. If the filename ends in .bz2, it will be compressed using BZip2.

Other kwargs are passed to the Model constructor of the chosen format.

source

AbstractConstraint

JuMP.AbstractConstraintType
abstract type AbstractConstraint

An abstract base type for all constraint types. AbstractConstraints store the function and set directly, unlike ConstraintRefs that are merely references to constraints stored in a model. AbstractConstraints do not need to be attached to a model.

source

AbstractJuMPScalar

JuMP.AbstractJuMPScalarType
AbstractJuMPScalar <: MutableArithmetics.AbstractMutable

Abstract base type for all scalar types

The subtyping of AbstractMutable will allow calls of some Base functions to be redirected to a method in MA that handles type promotion more carefully (for example the promotion in sparse matrix products in SparseArrays usually does not work for JuMP types) and exploits the mutability of AffExpr and QuadExpr.

source

AbstractModel

JuMP.AbstractModelType
AbstractModel

An abstract type that should be subtyped for users creating JuMP extensions.

source

AbstractScalarSet

JuMP.AbstractScalarSetType
AbstractScalarSet

An abstract type for defining new scalar sets in JuMP.

Implement moi_set(::AbstractScalarSet) to convert the type into an MOI set.

See also: moi_set.

source

AbstractShape

JuMP.AbstractShapeType
AbstractShape

Abstract vectorizable shape. Given a flat vector form of an object of shape shape, the original object can be obtained by reshape_vector.

source

AbstractVariable

JuMP.AbstractVariableType
AbstractVariable

Variable returned by build_variable. It represents a variable that has not been added yet to any model. It can be added to a given model with add_variable.

source

AbstractVariableRef

JuMP.AbstractVariableRefType
AbstractVariableRef

Variable returned by add_variable. Affine (resp. quadratic) operations with variables of type V<:AbstractVariableRef and coefficients of type T create a GenericAffExpr{T,V} (resp. GenericQuadExpr{T,V}).

source

AbstractVectorSet

JuMP.AbstractVectorSetType
AbstractVectorSet

An abstract type for defining new sets in JuMP.

Implement moi_set(::AbstractVectorSet, dim::Int) to convert the type into an MOI set.

See also: moi_set.

source

AffExpr

JuMP.AffExprType
AffExpr

Alias for GenericAffExpr{Float64,VariableRef}, the specific GenericAffExpr used by JuMP.

source

ArrayShape

JuMP.ArrayShapeType
ArrayShape{N}(dims::NTuple{N,Int}) where {N}

An AbstractShape that represents array-valued constraints.

Example

julia> model = Model();
 
 julia> @variable(model, x[1:2, 1:3]);
 
@@ -2671,12 +2671,12 @@
  x[2,1]  x[2,2]  x[2,3]] ∈ Nonnegatives()
 
 julia> shape(constraint_object(c))
-ArrayShape{2}((2, 3))
source

BinaryRef

JuMP.BinaryRefFunction
BinaryRef(v::GenericVariableRef)

Return a constraint reference to the constraint constraining v to be binary. Errors if one does not exist.

See also is_binary, set_binary, unset_binary.

Example

julia> model = Model();
+ArrayShape{2}((2, 3))
source

BinaryRef

JuMP.BinaryRefFunction
BinaryRef(v::GenericVariableRef)

Return a constraint reference to the constraint constraining v to be binary. Errors if one does not exist.

See also is_binary, set_binary, unset_binary.

Example

julia> model = Model();
 
 julia> @variable(model, x, Bin);
 
 julia> BinaryRef(x)
-x binary
source

BridgeableConstraint

JuMP.BridgeableConstraintType
BridgeableConstraint(
+x binary
source

BridgeableConstraint

JuMP.BridgeableConstraintType
BridgeableConstraint(
     constraint::C,
     bridge_type::B;
     coefficient_type::Type{T} = Float64,
@@ -2691,7 +2691,7 @@
 )
     constraint = ScalarConstraint(func, set)
     return BridgeableConstraint(constraint, CustomBridge)
-end

Note

JuMP extensions should extend JuMP.build_constraint only if they also defined CustomSet, for three reasons:

  1. It is problematic if multiple extensions overload the same JuMP method.
  2. A missing method will not inform the users that they forgot to load the extension module defining the build_constraint method.
  3. Defining a method where neither the function nor any of the argument types are defined in the package is called type piracy and is discouraged in the Julia style guide.
source

ComplexPlane

JuMP.ComplexPlaneType
ComplexPlane

Complex plane object that can be used to create a complex variable in the @variable macro.

Example

Consider the following example:

julia> model = Model();
+end

Note

JuMP extensions should extend JuMP.build_constraint only if they also defined CustomSet, for three reasons:

  1. It is problematic if multiple extensions overload the same JuMP method.
  2. A missing method will not inform the users that they forgot to load the extension module defining the build_constraint method.
  3. Defining a method where neither the function nor any of the argument types are defined in the package is called type piracy and is discouraged in the Julia style guide.
source

ComplexPlane

JuMP.ComplexPlaneType
ComplexPlane

Complex plane object that can be used to create a complex variable in the @variable macro.

Example

Consider the following example:

julia> model = Model();
 
 julia> @variable(model, x in ComplexPlane())
 real(x) + imag(x) im
@@ -2699,7 +2699,7 @@
 julia> all_variables(model)
 2-element Vector{VariableRef}:
  real(x)
- imag(x)

We see in the output of the last command that two real variables were created. The Julia variable x binds to an affine expression in terms of these two variables that parametrize the complex plane.

source

ComplexVariable

JuMP.ComplexVariableType
ComplexVariable{S,T,U,V} <: AbstractVariable

A struct used when adding complex variables.

See also: ComplexPlane.

source

ConstraintNotOwned

JuMP.ConstraintNotOwnedType
struct ConstraintNotOwned{C<:ConstraintRef} <: Exception
+ imag(x)

We see in the output of the last command that two real variables were created. The Julia variable x binds to an affine expression in terms of these two variables that parametrize the complex plane.

source

ComplexVariable

JuMP.ComplexVariableType
ComplexVariable{S,T,U,V} <: AbstractVariable

A struct used when adding complex variables.

See also: ComplexPlane.

source

ConstraintNotOwned

JuMP.ConstraintNotOwnedType
struct ConstraintNotOwned{C<:ConstraintRef} <: Exception
     constraint_ref::C
 end

An error thrown when the constraint constraint_ref was used in a model different to owner_model(constraint_ref).

Example

julia> model = Model();
 
@@ -2713,12 +2713,12 @@
 julia> MOI.get(model_new, MOI.ConstraintName(), c)
 ERROR: ConstraintNotOwned{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{Float64}, MathOptInterface.GreaterThan{Float64}}, ScalarShape}}(c : x ≥ 0)
 Stacktrace:
-[...]
source

ConstraintRef

JuMP.ConstraintRefType
ConstraintRef

Holds a reference to the model and the corresponding MOI.ConstraintIndex.

source

FixRef

JuMP.FixRefFunction
FixRef(v::GenericVariableRef)

Return a constraint reference to the constraint fixing the value of v.

Errors if one does not exist.

See also is_fixed, fix_value, fix, unfix.

Example

julia> model = Model();
+[...]
source

ConstraintRef

JuMP.ConstraintRefType
ConstraintRef

Holds a reference to the model and the corresponding MOI.ConstraintIndex.

source

FixRef

JuMP.FixRefFunction
FixRef(v::GenericVariableRef)

Return a constraint reference to the constraint fixing the value of v.

Errors if one does not exist.

See also is_fixed, fix_value, fix, unfix.

Example

julia> model = Model();
 
 julia> @variable(model, x == 1);
 
 julia> FixRef(x)
-x = 1
source

GenericAffExpr

JuMP.GenericAffExprType
mutable struct GenericAffExpr{CoefType,VarType} <: AbstractJuMPScalar
+x = 1
source

GenericAffExpr

JuMP.GenericAffExprType
mutable struct GenericAffExpr{CoefType,VarType} <: AbstractJuMPScalar
     constant::CoefType
     terms::OrderedDict{VarType,CoefType}
 end

An expression type representing an affine expression of the form: $\sum a_i x_i + c$.

Fields

  • .constant: the constant c in the expression.
  • .terms: an OrderedDict, with keys of VarType and values of CoefType describing the sparse vector a.

Example

julia> model = Model();
@@ -2734,13 +2734,13 @@
 julia> expr.terms
 OrderedCollections.OrderedDict{VariableRef, Float64} with 2 entries:
   x[2] => 1.0
-  x[1] => 3.0
source

GenericModel

JuMP.GenericModelType
GenericModel{T}(
+  x[1] => 3.0
source

GenericModel

JuMP.GenericModelType
GenericModel{T}(
     [optimizer_factory;]
     add_bridges::Bool = true,
 ) where {T<:Real}

Create a new instance of a JuMP model.

If optimizer_factory is provided, the model is initialized with the optimizer returned by MOI.instantiate(optimizer_factory).

If optimizer_factory is not provided, use set_optimizer to set the optimizer before calling optimize!.

If add_bridges, JuMP adds a MOI.Bridges.LazyBridgeOptimizer to automatically reformulate the problem into a form supported by the optimizer.

Value type T

Passing a type other than Float64 as the value type T is an advanced operation. The value type must match that expected by the chosen optimizer. Consult the optimizers documentation for details.

If not documented, assume that the optimizer supports only Float64.

Choosing an unsupported value type will throw an MOI.UnsupportedConstraint or an MOI.UnsupportedAttribute error, the timing of which (during the model construction or during a call to optimize!) depends on how the solver is interfaced to JuMP.

Example

julia> model = GenericModel{BigFloat}();
 
 julia> typeof(model)
-GenericModel{BigFloat}
source

GenericNonlinearExpr

JuMP.GenericNonlinearExprType
GenericNonlinearExpr{V}(head::Symbol, args::Vector{Any})
+GenericModel{BigFloat}
source

GenericNonlinearExpr

JuMP.GenericNonlinearExprType
GenericNonlinearExpr{V}(head::Symbol, args::Vector{Any})
 GenericNonlinearExpr{V}(head::Symbol, args::Any...)

The scalar-valued nonlinear function head(args...), represented as a symbolic expression tree, with the call operator head and ordered arguments in args.

V is the type of AbstractVariableRef present in the expression, and is used to help dispatch JuMP extensions.

head

The head::Symbol must be an operator supported by the model.

The default list of supported univariate operators is given by:

and the default list of supported multivariate operators is given by:

Additional operators can be add using @operator.

See the full list of operators supported by a MOI.ModelLike by querying the MOI.ListOfSupportedNonlinearOperators attribute.

args

The vector args contains the arguments to the nonlinear function. If the operator is univariate, it must contain one element. Otherwise, it may contain multiple elements.

Given a subtype of AbstractVariableRef, V, for GenericNonlinearExpr{V}, each element must be one of the following:

where T<:Real and T == value_type(V).

Unsupported operators

If the optimizer does not support head, an MOI.UnsupportedNonlinearOperator error will be thrown.

There is no guarantee about when this error will be thrown; it may be thrown when the function is first added to the model, or it may be thrown when optimize! is called.

Example

To represent the function $f(x) = sin(x)^2$, do:

julia> model = Model();
 
 julia> @variable(model, x)
@@ -2754,7 +2754,7 @@
            GenericNonlinearExpr{VariableRef}(:sin, x),
            2.0,
        )
-sin(x) ^ 2.0
source

GenericQuadExpr

JuMP.GenericQuadExprType
mutable struct GenericQuadExpr{CoefType,VarType} <: AbstractJuMPScalar
+sin(x) ^ 2.0
source

GenericQuadExpr

JuMP.GenericQuadExprType
mutable struct GenericQuadExpr{CoefType,VarType} <: AbstractJuMPScalar
     aff::GenericAffExpr{CoefType,VarType}
     terms::OrderedDict{UnorderedPair{VarType}, CoefType}
 end

An expression type representing an quadratic expression of the form: $\sum q_{i,j} x_i x_j + \sum a_i x_i + c$.

Fields

  • .aff: an GenericAffExpr representing the affine portion of the expression.
  • .terms: an OrderedDict, with keys of UnorderedPair{VarType} and values of CoefType, describing the sparse list of terms q.

Example

julia> model = Model();
@@ -2770,16 +2770,16 @@
 julia> expr.terms
 OrderedCollections.OrderedDict{UnorderedPair{VariableRef}, Float64} with 2 entries:
   UnorderedPair{VariableRef}(x[1], x[1]) => 2.0
-  UnorderedPair{VariableRef}(x[1], x[2]) => 1.0
source

GenericReferenceMap

JuMP.GenericReferenceMapType
GenericReferenceMap{T}

Mapping between variable and constraint reference of a model and its copy. The reference of the copied model can be obtained by indexing the map with the reference of the corresponding reference of the original model.

source

GenericVariableRef

JuMP.GenericVariableRefType
GenericVariableRef{T} <: AbstractVariableRef

Holds a reference to the model and the corresponding MOI.VariableIndex.

source

GreaterThanZero

JuMP.GreaterThanZeroType
GreaterThanZero()

A struct used to intercept when >= or is used in a macro via operator_to_set.

This struct is not the same as Nonnegatives so that we can disambiguate x >= y and x - y in Nonnegatives().

This struct is not intended for general usage, but it may be useful to some JuMP extensions.

Example

julia> operator_to_set(error, Val(:>=))
-GreaterThanZero()
source

HermitianMatrixAdjointShape

JuMP.HermitianMatrixAdjointShapeType
HermitianMatrixAdjointShape(side_dimension)

The dual_shape of HermitianMatrixShape.

This shape is not intended for regular use.

source

HermitianMatrixShape

JuMP.HermitianMatrixShapeType
HermitianMatrixShape(
+  UnorderedPair{VariableRef}(x[1], x[2]) => 1.0
source

GenericReferenceMap

JuMP.GenericReferenceMapType
GenericReferenceMap{T}

Mapping between variable and constraint reference of a model and its copy. The reference of the copied model can be obtained by indexing the map with the reference of the corresponding reference of the original model.

source

GenericVariableRef

JuMP.GenericVariableRefType
GenericVariableRef{T} <: AbstractVariableRef

Holds a reference to the model and the corresponding MOI.VariableIndex.

source

GreaterThanZero

JuMP.GreaterThanZeroType
GreaterThanZero()

A struct used to intercept when >= or is used in a macro via operator_to_set.

This struct is not the same as Nonnegatives so that we can disambiguate x >= y and x - y in Nonnegatives().

This struct is not intended for general usage, but it may be useful to some JuMP extensions.

Example

julia> operator_to_set(error, Val(:>=))
+GreaterThanZero()
source

HermitianMatrixAdjointShape

JuMP.HermitianMatrixAdjointShapeType
HermitianMatrixAdjointShape(side_dimension)

The dual_shape of HermitianMatrixShape.

This shape is not intended for regular use.

source

HermitianMatrixShape

JuMP.HermitianMatrixShapeType
HermitianMatrixShape(
     side_dimension::Int;
     needs_adjoint_dual::Bool = false,
-)

The shape object for a Hermitian square matrix of side_dimension rows and columns.

The vectorized form corresponds to MOI.HermitianPositiveSemidefiniteConeTriangle.

needs_adjoint_dual

By default, the dual_shape of HermitianMatrixShape is also HermitianMatrixShape. This is true for cases such as a LinearAlgebra.Hermitian matrix in HermitianPSDCone.

However, JuMP also supports LinearAlgebra.Hermitian matrix in Zeros, which is interpreted as an element-wise equality constraint. By exploiting symmetry, we pass only the upper triangle of the equality constraints. This works for the primal, but it leads to a factor of 2 difference in the off-diagonal dual elements. (The dual value of the (i, j) element in the triangle formulation should be divided by 2 when spread across the (i, j) and (j, i) elements in the square matrix formulation.) If the constraint has this dual inconsistency, set needs_adjoint_dual = true.

source

HermitianMatrixSpace

JuMP.HermitianMatrixSpaceType
HermitianMatrixSpace()

Use in the @variable macro to constrain a matrix of variables to be hermitian.

Example

julia> model = Model();
+)

The shape object for a Hermitian square matrix of side_dimension rows and columns.

The vectorized form corresponds to MOI.HermitianPositiveSemidefiniteConeTriangle.

needs_adjoint_dual

By default, the dual_shape of HermitianMatrixShape is also HermitianMatrixShape. This is true for cases such as a LinearAlgebra.Hermitian matrix in HermitianPSDCone.

However, JuMP also supports LinearAlgebra.Hermitian matrix in Zeros, which is interpreted as an element-wise equality constraint. By exploiting symmetry, we pass only the upper triangle of the equality constraints. This works for the primal, but it leads to a factor of 2 difference in the off-diagonal dual elements. (The dual value of the (i, j) element in the triangle formulation should be divided by 2 when spread across the (i, j) and (j, i) elements in the square matrix formulation.) If the constraint has this dual inconsistency, set needs_adjoint_dual = true.

source

HermitianMatrixSpace

JuMP.HermitianMatrixSpaceType
HermitianMatrixSpace()

Use in the @variable macro to constrain a matrix of variables to be hermitian.

Example

julia> model = Model();
 
 julia> @variable(model, Q[1:2, 1:2] in HermitianMatrixSpace())
 2×2 LinearAlgebra.Hermitian{GenericAffExpr{ComplexF64, VariableRef}, Matrix{GenericAffExpr{ComplexF64, VariableRef}}}:
  real(Q[1,1])                    real(Q[1,2]) + imag(Q[1,2]) im
- real(Q[1,2]) - imag(Q[1,2]) im  real(Q[2,2])
source

HermitianPSDCone

JuMP.HermitianPSDConeType
HermitianPSDCone

Hermitian positive semidefinite cone object that can be used to create a Hermitian positive semidefinite square matrix in the @variable and @constraint macros.

Example

Consider the following example:

julia> model = Model();
+ real(Q[1,2]) - imag(Q[1,2]) im  real(Q[2,2])
source

HermitianPSDCone

JuMP.HermitianPSDConeType
HermitianPSDCone

Hermitian positive semidefinite cone object that can be used to create a Hermitian positive semidefinite square matrix in the @variable and @constraint macros.

Example

Consider the following example:

julia> model = Model();
 
 julia> @variable(model, H[1:3, 1:3] in HermitianPSDCone())
 3×3 LinearAlgebra.Hermitian{GenericAffExpr{ComplexF64, VariableRef}, Matrix{GenericAffExpr{ComplexF64, VariableRef}}}:
@@ -2801,18 +2801,18 @@
 
 julia> all_constraints(model, Vector{VariableRef}, MOI.HermitianPositiveSemidefiniteConeTriangle)
 1-element Vector{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.VectorOfVariables, MathOptInterface.HermitianPositiveSemidefiniteConeTriangle}}}:
- [real(H[1,1]), real(H[1,2]), real(H[2,2]), real(H[1,3]), real(H[2,3]), real(H[3,3]), imag(H[1,2]), imag(H[1,3]), imag(H[2,3])] ∈ MathOptInterface.HermitianPositiveSemidefiniteConeTriangle(3)

We see in the output of the last commands that 9 real variables were created. The matrix H constrains affine expressions in terms of these 9 variables that parametrize a Hermitian matrix.

source

IntegerRef

JuMP.IntegerRefFunction
IntegerRef(v::GenericVariableRef)

Return a constraint reference to the constraint constraining v to be integer.

Errors if one does not exist.

See also is_integer, set_integer, unset_integer.

Example

julia> model = Model();
+ [real(H[1,1]), real(H[1,2]), real(H[2,2]), real(H[1,3]), real(H[2,3]), real(H[3,3]), imag(H[1,2]), imag(H[1,3]), imag(H[2,3])] ∈ MathOptInterface.HermitianPositiveSemidefiniteConeTriangle(3)

We see in the output of the last commands that 9 real variables were created. The matrix H constrains affine expressions in terms of these 9 variables that parametrize a Hermitian matrix.

source

IntegerRef

JuMP.IntegerRefFunction
IntegerRef(v::GenericVariableRef)

Return a constraint reference to the constraint constraining v to be integer.

Errors if one does not exist.

See also is_integer, set_integer, unset_integer.

Example

julia> model = Model();
 
 julia> @variable(model, x, Int);
 
 julia> IntegerRef(x)
-x integer
source

LPMatrixData

JuMP.LPMatrixDataType
LPMatrixData{T}

The struct returned by lp_matrix_data. See lp_matrix_data for a description of the public fields.

source

LessThanZero

JuMP.LessThanZeroType
GreaterThanZero()

A struct used to intercept when <= or is used in a macro via operator_to_set.

This struct is not the same as Nonpositives so that we can disambiguate x <= y and x - y in Nonpositives().

This struct is not intended for general usage, but it may be useful to some JuMP extensions.

Example

julia> operator_to_set(error, Val(:<=))
-LessThanZero()
source

LinearTermIterator

JuMP.LinearTermIteratorType
LinearTermIterator{GAE<:GenericAffExpr}

A struct that implements the iterate protocol in order to iterate over tuples of (coefficient, variable) in the GenericAffExpr.

source

LowerBoundRef

JuMP.LowerBoundRefFunction
LowerBoundRef(v::GenericVariableRef)

Return a constraint reference to the lower bound constraint of v.

Errors if one does not exist.

See also has_lower_bound, lower_bound, set_lower_bound, delete_lower_bound.

Example

julia> model = Model();
+x integer
source

LPMatrixData

JuMP.LPMatrixDataType
LPMatrixData{T}

The struct returned by lp_matrix_data. See lp_matrix_data for a description of the public fields.

source

LessThanZero

JuMP.LessThanZeroType
GreaterThanZero()

A struct used to intercept when <= or is used in a macro via operator_to_set.

This struct is not the same as Nonpositives so that we can disambiguate x <= y and x - y in Nonpositives().

This struct is not intended for general usage, but it may be useful to some JuMP extensions.

Example

julia> operator_to_set(error, Val(:<=))
+LessThanZero()
source

LinearTermIterator

JuMP.LinearTermIteratorType
LinearTermIterator{GAE<:GenericAffExpr}

A struct that implements the iterate protocol in order to iterate over tuples of (coefficient, variable) in the GenericAffExpr.

source

LowerBoundRef

JuMP.LowerBoundRefFunction
LowerBoundRef(v::GenericVariableRef)

Return a constraint reference to the lower bound constraint of v.

Errors if one does not exist.

See also has_lower_bound, lower_bound, set_lower_bound, delete_lower_bound.

Example

julia> model = Model();
 
 julia> @variable(model, x >= 1.0);
 
 julia> LowerBoundRef(x)
-x ≥ 1
source

Model

JuMP.ModelType
Model([optimizer_factory;] add_bridges::Bool = true)

Create a new instance of a JuMP model.

If optimizer_factory is provided, the model is initialized with thhe optimizer returned by MOI.instantiate(optimizer_factory).

If optimizer_factory is not provided, use set_optimizer to set the optimizer before calling optimize!.

If add_bridges, JuMP adds a MOI.Bridges.LazyBridgeOptimizer to automatically reformulate the problem into a form supported by the optimizer.

Example

julia> import Ipopt
+x ≥ 1
source

Model

JuMP.ModelType
Model([optimizer_factory;] add_bridges::Bool = true)

Create a new instance of a JuMP model.

If optimizer_factory is provided, the model is initialized with thhe optimizer returned by MOI.instantiate(optimizer_factory).

If optimizer_factory is not provided, use set_optimizer to set the optimizer before calling optimize!.

If add_bridges, JuMP adds a MOI.Bridges.LazyBridgeOptimizer to automatically reformulate the problem into a form supported by the optimizer.

Example

julia> import Ipopt
 
 julia> model = Model(Ipopt.Optimizer);
 
@@ -2823,16 +2823,16 @@
 
 julia> import MultiObjectiveAlgorithms as MOA
 
-julia> model = Model(() -> MOA.Optimizer(HiGHS.Optimizer); add_bridges = false);
source

ModelMode

JuMP.ModelModeType
ModelMode

An enum to describe the state of the CachingOptimizer inside a JuMP model.

See also: mode.

Values

Possible values are:

  • [AUTOMATIC]: moi_backend field holds a CachingOptimizer in AUTOMATIC mode.
  • [MANUAL]: moi_backend field holds a CachingOptimizer in MANUAL mode.
  • [DIRECT]: moi_backend field holds an AbstractOptimizer. No extra copy of the model is stored. The moi_backend must support add_constraint etc.
source

NLPEvaluator

JuMP.NLPEvaluatorFunction
NLPEvaluator(
+julia> model = Model(() -> MOA.Optimizer(HiGHS.Optimizer); add_bridges = false);
source

ModelMode

JuMP.ModelModeType
ModelMode

An enum to describe the state of the CachingOptimizer inside a JuMP model.

See also: mode.

Values

Possible values are:

  • [AUTOMATIC]: moi_backend field holds a CachingOptimizer in AUTOMATIC mode.
  • [MANUAL]: moi_backend field holds a CachingOptimizer in MANUAL mode.
  • [DIRECT]: moi_backend field holds an AbstractOptimizer. No extra copy of the model is stored. The moi_backend must support add_constraint etc.
source

NLPEvaluator

JuMP.NLPEvaluatorFunction
NLPEvaluator(
     model::Model,
     _differentiation_backend::MOI.Nonlinear.AbstractAutomaticDifferentiation =
         MOI.Nonlinear.SparseReverseMode(),
-)

Return an MOI.AbstractNLPEvaluator constructed from model

Warning

Before using, you must initialize the evaluator using MOI.initialize.

Experimental

These features may change or be removed in any future version of JuMP.

Pass _differentiation_backend to specify the differentiation backend used to compute derivatives.

source

NoOptimizer

JuMP.NoOptimizerType
struct NoOptimizer <: Exception end

An error thrown when no optimizer is set and one is required.

The optimizer can be provided to the Model constructor or by calling set_optimizer.

Example

julia> model = Model();
+)

Return an MOI.AbstractNLPEvaluator constructed from model

Warning

Before using, you must initialize the evaluator using MOI.initialize.

Experimental

These features may change or be removed in any future version of JuMP.

Pass _differentiation_backend to specify the differentiation backend used to compute derivatives.

source

NoOptimizer

JuMP.NoOptimizerType
struct NoOptimizer <: Exception end

An error thrown when no optimizer is set and one is required.

The optimizer can be provided to the Model constructor or by calling set_optimizer.

Example

julia> model = Model();
 
 julia> optimize!(model)
 ERROR: NoOptimizer()
 Stacktrace:
-[...]
source

NonlinearExpr

JuMP.NonlinearExprType
NonlinearExpr

Alias for GenericNonlinearExpr{VariableRef}, the specific GenericNonlinearExpr used by JuMP.

source

NonlinearOperator

JuMP.NonlinearOperatorType
NonlinearOperator(func::Function, head::Symbol)

A callable struct (functor) representing a function named head.

When called with AbstractJuMPScalars, the struct returns a GenericNonlinearExpr.

When called with non-JuMP types, the struct returns the evaluation of func(args...).

Unless head is special-cased by the optimizer, the operator must have already been added to the model using add_nonlinear_operator or @operator.

Example

julia> model = Model();
+[...]
source

NonlinearExpr

JuMP.NonlinearExprType
NonlinearExpr

Alias for GenericNonlinearExpr{VariableRef}, the specific GenericNonlinearExpr used by JuMP.

source

NonlinearOperator

JuMP.NonlinearOperatorType
NonlinearOperator(func::Function, head::Symbol)

A callable struct (functor) representing a function named head.

When called with AbstractJuMPScalars, the struct returns a GenericNonlinearExpr.

When called with non-JuMP types, the struct returns the evaluation of func(args...).

Unless head is special-cased by the optimizer, the operator must have already been added to the model using add_nonlinear_operator or @operator.

Example

julia> model = Model();
 
 julia> @variable(model, x)
 x
@@ -2856,7 +2856,7 @@
 op_f(x)
 
 julia> bar(2.0)
-4.0
source

Nonnegatives

JuMP.NonnegativesType
Nonnegatives()

The JuMP equivalent of the MOI.Nonnegatives set, in which the dimension is inferred from the corresponding function.

Example

julia> model = Model();
+4.0
source

Nonnegatives

JuMP.NonnegativesType
Nonnegatives()

The JuMP equivalent of the MOI.Nonnegatives set, in which the dimension is inferred from the corresponding function.

Example

julia> model = Model();
 
 julia> @variable(model, x[1:2])
 2-element Vector{VariableRef}:
@@ -2871,7 +2871,7 @@
 julia> b = [5, 6];
 
 julia> @constraint(model, A * x >= b)
-[x[1] + 2 x[2] - 5, 3 x[1] + 4 x[2] - 6] ∈ Nonnegatives()
source

Nonpositives

JuMP.NonpositivesType
Nonpositives()

The JuMP equivalent of the MOI.Nonpositives set, in which the dimension is inferred from the corresponding function.

Example

julia> model = Model();
+[x[1] + 2 x[2] - 5, 3 x[1] + 4 x[2] - 6] ∈ Nonnegatives()
source

Nonpositives

JuMP.NonpositivesType
Nonpositives()

The JuMP equivalent of the MOI.Nonpositives set, in which the dimension is inferred from the corresponding function.

Example

julia> model = Model();
 
 julia> @variable(model, x[1:2])
 2-element Vector{VariableRef}:
@@ -2886,14 +2886,14 @@
 julia> b = [5, 6];
 
 julia> @constraint(model, A * x <= b)
-[x[1] + 2 x[2] - 5, 3 x[1] + 4 x[2] - 6] ∈ Nonpositives()
source

OptimizationSense

JuMP.OptimizationSenseType
OptimizationSense

An enum for the value of the ObjectiveSense attribute.

Values

Possible values are:

  • MIN_SENSE: the goal is to minimize the objective function
  • MAX_SENSE: the goal is to maximize the objective function
  • FEASIBILITY_SENSE: the model does not have an objective function
source

OptimizeNotCalled

JuMP.OptimizeNotCalledType
struct OptimizeNotCalled <: Exception end

An error thrown when a result attribute cannot be queried before optimize! is called.

Example

julia> import Ipopt
+[x[1] + 2 x[2] - 5, 3 x[1] + 4 x[2] - 6] ∈ Nonpositives()
source

OptimizationSense

JuMP.OptimizationSenseType
OptimizationSense

An enum for the value of the ObjectiveSense attribute.

Values

Possible values are:

  • MIN_SENSE: the goal is to minimize the objective function
  • MAX_SENSE: the goal is to maximize the objective function
  • FEASIBILITY_SENSE: the model does not have an objective function
source

OptimizeNotCalled

JuMP.OptimizeNotCalledType
struct OptimizeNotCalled <: Exception end

An error thrown when a result attribute cannot be queried before optimize! is called.

Example

julia> import Ipopt
 
 julia> model = Model(Ipopt.Optimizer);
 
 julia> objective_value(model)
 ERROR: OptimizeNotCalled()
 Stacktrace:
-[...]
source

PSDCone

JuMP.PSDConeType
PSDCone

Positive semidefinite cone object that can be used to constrain a square matrix to be positive semidefinite in the @constraint macro.

If the matrix has type Symmetric then the columns vectorization (the vector obtained by concatenating the columns) of its upper triangular part is constrained to belong to the MOI.PositiveSemidefiniteConeTriangle set, otherwise its column vectorization is constrained to belong to the MOI.PositiveSemidefiniteConeSquare set.

Example

Non-symmetric case:

julia> model = Model();
+[...]
source

PSDCone

JuMP.PSDConeType
PSDCone

Positive semidefinite cone object that can be used to constrain a square matrix to be positive semidefinite in the @constraint macro.

If the matrix has type Symmetric then the columns vectorization (the vector obtained by concatenating the columns) of its upper triangular part is constrained to belong to the MOI.PositiveSemidefiniteConeTriangle set, otherwise its column vectorization is constrained to belong to the MOI.PositiveSemidefiniteConeSquare set.

Example

Non-symmetric case:

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -2934,7 +2934,7 @@
  x - 4
 
 julia> moi_set(constraint_object(cref))
-MathOptInterface.PositiveSemidefiniteConeTriangle(2)
source

Parameter

JuMP.ParameterType
Parameter(value)

A short-cut for the MOI.Parameter set.

Example

julia> model = Model();
+MathOptInterface.PositiveSemidefiniteConeTriangle(2)
source

Parameter

JuMP.ParameterType
Parameter(value)

A short-cut for the MOI.Parameter set.

Example

julia> model = Model();
 
 julia> @variable(model, x in Parameter(2))
 x
@@ -2942,7 +2942,7 @@
 julia> print(model)
 Feasibility
 Subject to
- x ∈ MathOptInterface.Parameter{Float64}(2.0)
source

ParameterRef

JuMP.ParameterRefFunction
ParameterRef(x::GenericVariableRef)

Return a constraint reference to the constraint constraining x to be a parameter.

Errors if one does not exist.

See also is_parameter, set_parameter_value, parameter_value.

Example

julia> model = Model();
+ x ∈ MathOptInterface.Parameter{Float64}(2.0)
source

ParameterRef

JuMP.ParameterRefFunction
ParameterRef(x::GenericVariableRef)

Return a constraint reference to the constraint constraining x to be a parameter.

Errors if one does not exist.

See also is_parameter, set_parameter_value, parameter_value.

Example

julia> model = Model();
 
 julia> @variable(model, p in Parameter(2))
 p
@@ -2955,7 +2955,7 @@
 julia> ParameterRef(x)
 ERROR: Variable x is not a parameter.
 Stacktrace:
-[...]
source

QuadExpr

JuMP.QuadExprType
QuadExpr

An alias for GenericQuadExpr{Float64,VariableRef}, the specific GenericQuadExpr used by JuMP.

source

QuadTermIterator

JuMP.QuadTermIteratorType
QuadTermIterator{GQE<:GenericQuadExpr}

A struct that implements the iterate protocol in order to iterate over tuples of (coefficient, variable, variable) in the GenericQuadExpr.

source

ReferenceMap

JuMP.ReferenceMapType
GenericReferenceMap{T}

Mapping between variable and constraint reference of a model and its copy. The reference of the copied model can be obtained by indexing the map with the reference of the corresponding reference of the original model.

source

ResultStatusCode

JuMP.ResultStatusCodeType
ResultStatusCode

An Enum of possible values for the PrimalStatus and DualStatus attributes.

The values indicate how to interpret the result vector.

Values

Possible values are:

  • NO_SOLUTION: the result vector is empty.
  • FEASIBLE_POINT: the result vector is a feasible point.
  • NEARLY_FEASIBLE_POINT: the result vector is feasible if some constraint tolerances are relaxed.
  • INFEASIBLE_POINT: the result vector is an infeasible point.
  • INFEASIBILITY_CERTIFICATE: the result vector is an infeasibility certificate. If the PrimalStatus is INFEASIBILITY_CERTIFICATE, then the primal result vector is a certificate of dual infeasibility. If the DualStatus is INFEASIBILITY_CERTIFICATE, then the dual result vector is a proof of primal infeasibility.
  • NEARLY_INFEASIBILITY_CERTIFICATE: the result satisfies a relaxed criterion for a certificate of infeasibility.
  • REDUCTION_CERTIFICATE: the result vector is an ill-posed certificate; see this article for details. If the PrimalStatus is REDUCTION_CERTIFICATE, then the primal result vector is a proof that the dual problem is ill-posed. If the DualStatus is REDUCTION_CERTIFICATE, then the dual result vector is a proof that the primal is ill-posed.
  • NEARLY_REDUCTION_CERTIFICATE: the result satisfies a relaxed criterion for an ill-posed certificate.
  • UNKNOWN_RESULT_STATUS: the result vector contains a solution with an unknown interpretation.
  • OTHER_RESULT_STATUS: the result vector contains a solution with an interpretation not covered by one of the statuses defined above
source

RotatedSecondOrderCone

JuMP.RotatedSecondOrderConeType
RotatedSecondOrderCone

Rotated second order cone object that can be used to constrain the square of the euclidean norm of a vector x to be less than or equal to $2tu$ where t and u are nonnegative scalars. This is a shortcut for the MOI.RotatedSecondOrderCone.

Example

The following constrains $\|(x-1, x-2)\|^2_2 \le 2tx$ and $t, x \ge 0$:

julia> model = Model();
+[...]
source

QuadExpr

JuMP.QuadExprType
QuadExpr

An alias for GenericQuadExpr{Float64,VariableRef}, the specific GenericQuadExpr used by JuMP.

source

QuadTermIterator

JuMP.QuadTermIteratorType
QuadTermIterator{GQE<:GenericQuadExpr}

A struct that implements the iterate protocol in order to iterate over tuples of (coefficient, variable, variable) in the GenericQuadExpr.

source

ReferenceMap

JuMP.ReferenceMapType
GenericReferenceMap{T}

Mapping between variable and constraint reference of a model and its copy. The reference of the copied model can be obtained by indexing the map with the reference of the corresponding reference of the original model.

source

ResultStatusCode

JuMP.ResultStatusCodeType
ResultStatusCode

An Enum of possible values for the PrimalStatus and DualStatus attributes.

The values indicate how to interpret the result vector.

Values

Possible values are:

  • NO_SOLUTION: the result vector is empty.
  • FEASIBLE_POINT: the result vector is a feasible point.
  • NEARLY_FEASIBLE_POINT: the result vector is feasible if some constraint tolerances are relaxed.
  • INFEASIBLE_POINT: the result vector is an infeasible point.
  • INFEASIBILITY_CERTIFICATE: the result vector is an infeasibility certificate. If the PrimalStatus is INFEASIBILITY_CERTIFICATE, then the primal result vector is a certificate of dual infeasibility. If the DualStatus is INFEASIBILITY_CERTIFICATE, then the dual result vector is a proof of primal infeasibility.
  • NEARLY_INFEASIBILITY_CERTIFICATE: the result satisfies a relaxed criterion for a certificate of infeasibility.
  • REDUCTION_CERTIFICATE: the result vector is an ill-posed certificate; see this article for details. If the PrimalStatus is REDUCTION_CERTIFICATE, then the primal result vector is a proof that the dual problem is ill-posed. If the DualStatus is REDUCTION_CERTIFICATE, then the dual result vector is a proof that the primal is ill-posed.
  • NEARLY_REDUCTION_CERTIFICATE: the result satisfies a relaxed criterion for an ill-posed certificate.
  • UNKNOWN_RESULT_STATUS: the result vector contains a solution with an unknown interpretation.
  • OTHER_RESULT_STATUS: the result vector contains a solution with an interpretation not covered by one of the statuses defined above
source

RotatedSecondOrderCone

JuMP.RotatedSecondOrderConeType
RotatedSecondOrderCone

Rotated second order cone object that can be used to constrain the square of the euclidean norm of a vector x to be less than or equal to $2tu$ where t and u are nonnegative scalars. This is a shortcut for the MOI.RotatedSecondOrderCone.

Example

The following constrains $\|(x-1, x-2)\|^2_2 \le 2tx$ and $t, x \ge 0$:

julia> model = Model();
 
 julia> @variable(model, x)
 x
@@ -2964,7 +2964,7 @@
 t
 
 julia> @constraint(model, [t, x, x-1, x-2] in RotatedSecondOrderCone())
-[t, x, x - 1, x - 2] ∈ MathOptInterface.RotatedSecondOrderCone(4)
source

SOS1

JuMP.SOS1Type
SOS1(weights = Real[])

The SOS1 (Special Ordered Set of Type 1) set constrains a vector x to the set where at most one variable can take a non-zero value, and all other elements are zero.

The weights vector, if specified, induces an ordering of the variables; as such, it should contain unique values. The weights vector must have the same number of elements as the vector x, and the element weights[i] corresponds to element x[i]. If not provided, the weights vector defaults to weights[i] = i.

This is a shortcut for the MOI.SOS1 set.

Example

julia> model = Model();
+[t, x, x - 1, x - 2] ∈ MathOptInterface.RotatedSecondOrderCone(4)
source

SOS1

JuMP.SOS1Type
SOS1(weights = Real[])

The SOS1 (Special Ordered Set of Type 1) set constrains a vector x to the set where at most one variable can take a non-zero value, and all other elements are zero.

The weights vector, if specified, induces an ordering of the variables; as such, it should contain unique values. The weights vector must have the same number of elements as the vector x, and the element weights[i] corresponds to element x[i]. If not provided, the weights vector defaults to weights[i] = i.

This is a shortcut for the MOI.SOS1 set.

Example

julia> model = Model();
 
 julia> @variable(model, x[1:3] in SOS1([4.1, 3.2, 5.0]))
 3-element Vector{VariableRef}:
@@ -2975,7 +2975,7 @@
 julia> print(model)
 Feasibility
 Subject to
- [x[1], x[2], x[3]] ∈ MathOptInterface.SOS1{Float64}([4.1, 3.2, 5.0])
source

SOS2

JuMP.SOS2Type
SOS2(weights = Real[])

The SOS2 (Special Ordered Set of Type 2) set constrains a vector x to the set where at most two variables can take a non-zero value, and all other elements are zero. In addition, the two non-zero values must be consecutive given the ordering of the x vector induced by weights.

The weights vector, if specified, induces an ordering of the variables; as such, it must contain unique values. The weights vector must have the same number of elements as the vector x, and the element weights[i] corresponds to element x[i]. If not provided, the weights vector defaults to weights[i] = i.

This is a shortcut for the MOI.SOS2 set.

Example

julia> model = Model();
+ [x[1], x[2], x[3]] ∈ MathOptInterface.SOS1{Float64}([4.1, 3.2, 5.0])
source

SOS2

JuMP.SOS2Type
SOS2(weights = Real[])

The SOS2 (Special Ordered Set of Type 2) set constrains a vector x to the set where at most two variables can take a non-zero value, and all other elements are zero. In addition, the two non-zero values must be consecutive given the ordering of the x vector induced by weights.

The weights vector, if specified, induces an ordering of the variables; as such, it must contain unique values. The weights vector must have the same number of elements as the vector x, and the element weights[i] corresponds to element x[i]. If not provided, the weights vector defaults to weights[i] = i.

This is a shortcut for the MOI.SOS2 set.

Example

julia> model = Model();
 
 julia> @variable(model, x[1:3] in SOS2([4.1, 3.2, 5.0]))
 3-element Vector{VariableRef}:
@@ -2986,7 +2986,7 @@
 julia> print(model)
 Feasibility
 Subject to
- [x[1], x[2], x[3]] ∈ MathOptInterface.SOS2{Float64}([4.1, 3.2, 5.0])
source

ScalarConstraint

JuMP.ScalarConstraintType
struct ScalarConstraint

The data for a scalar constraint.

See also the documentation on JuMP's representation of constraints for more background.

Fields

  • .func: field contains a JuMP object representing the function
  • .set: field contains the MOI set

Example

A scalar constraint:

julia> model = Model();
+ [x[1], x[2], x[3]] ∈ MathOptInterface.SOS2{Float64}([4.1, 3.2, 5.0])
source

ScalarConstraint

JuMP.ScalarConstraintType
struct ScalarConstraint

The data for a scalar constraint.

See also the documentation on JuMP's representation of constraints for more background.

Fields

  • .func: field contains a JuMP object representing the function
  • .set: field contains the MOI set

Example

A scalar constraint:

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -3003,14 +3003,14 @@
 2 x
 
 julia> object.set
-MathOptInterface.LessThan{Float64}(1.0)
source

ScalarShape

JuMP.ScalarShapeType
ScalarShape()

An AbstractShape that represents scalar constraints.

Example

julia> model = Model();
+MathOptInterface.LessThan{Float64}(1.0)
source

ScalarShape

JuMP.ScalarShapeType
ScalarShape()

An AbstractShape that represents scalar constraints.

Example

julia> model = Model();
 
 julia> @variable(model, x[1:2]);
 
 julia> c = @constraint(model, x[2] <= 1);
 
 julia> shape(constraint_object(c))
-ScalarShape()
source

ScalarVariable

JuMP.ScalarVariableType
ScalarVariable{S,T,U,V} <: AbstractVariable

A struct used when adding variables.

See also: add_variable.

source

SecondOrderCone

JuMP.SecondOrderConeType
SecondOrderCone

Second order cone object that can be used to constrain the euclidean norm of a vector x to be less than or equal to a nonnegative scalar t. This is a shortcut for the MOI.SecondOrderCone.

Example

The following constrains $\|(x-1, x-2)\|_2 \le t$ and $t \ge 0$:

julia> model = Model();
+ScalarShape()
source

ScalarVariable

JuMP.ScalarVariableType
ScalarVariable{S,T,U,V} <: AbstractVariable

A struct used when adding variables.

See also: add_variable.

source

SecondOrderCone

JuMP.SecondOrderConeType
SecondOrderCone

Second order cone object that can be used to constrain the euclidean norm of a vector x to be less than or equal to a nonnegative scalar t. This is a shortcut for the MOI.SecondOrderCone.

Example

The following constrains $\|(x-1, x-2)\|_2 \le t$ and $t \ge 0$:

julia> model = Model();
 
 julia> @variable(model, x)
 x
@@ -3019,7 +3019,7 @@
 t
 
 julia> @constraint(model, [t, x-1, x-2] in SecondOrderCone())
-[t, x - 1, x - 2] ∈ MathOptInterface.SecondOrderCone(3)
source

Semicontinuous

JuMP.SemicontinuousType
Semicontinuous(lower, upper)

A short-cut for the MOI.Semicontinuous set.

This short-cut is useful because it automatically promotes lower and upper to the same type, and converts them into the element type supported by the JuMP model.

Example

julia> model = Model();
+[t, x - 1, x - 2] ∈ MathOptInterface.SecondOrderCone(3)
source

Semicontinuous

JuMP.SemicontinuousType
Semicontinuous(lower, upper)

A short-cut for the MOI.Semicontinuous set.

This short-cut is useful because it automatically promotes lower and upper to the same type, and converts them into the element type supported by the JuMP model.

Example

julia> model = Model();
 
 julia> @variable(model, x in Semicontinuous(1, 2))
 x
@@ -3027,7 +3027,7 @@
 julia> print(model)
 Feasibility
 Subject to
- x ∈ MathOptInterface.Semicontinuous{Int64}(1, 2)
source

Semiinteger

JuMP.SemiintegerType
Semiinteger(lower, upper)

A short-cut for the MOI.Semiinteger set.

This short-cut is useful because it automatically promotes lower and upper to the same type, and converts them into the element type supported by the JuMP model.

Example

julia> model = Model();
+ x ∈ MathOptInterface.Semicontinuous{Int64}(1, 2)
source

Semiinteger

JuMP.SemiintegerType
Semiinteger(lower, upper)

A short-cut for the MOI.Semiinteger set.

This short-cut is useful because it automatically promotes lower and upper to the same type, and converts them into the element type supported by the JuMP model.

Example

julia> model = Model();
 
 julia> @variable(model, x in Semiinteger(3, 5))
 x
@@ -3035,12 +3035,12 @@
 julia> print(model)
 Feasibility
 Subject to
- x ∈ MathOptInterface.Semiinteger{Int64}(3, 5)
source

SensitivityReport

JuMP.SensitivityReportType
SensitivityReport

See lp_sensitivity_report.

source

SkewSymmetricMatrixShape

JuMP.SkewSymmetricMatrixShapeType
SkewSymmetricMatrixShape

Shape object for a skew symmetric square matrix of side_dimension rows and columns. The vectorized form contains the entries of the upper-right triangular part of the matrix (without the diagonal) given column by column (or equivalently, the entries of the lower-left triangular part given row by row). The diagonal is zero.

source

SkewSymmetricMatrixSpace

JuMP.SkewSymmetricMatrixSpaceType
SkewSymmetricMatrixSpace()

Use in the @variable macro to constrain a matrix of variables to be skew-symmetric.

Example

julia> model = Model();
+ x ∈ MathOptInterface.Semiinteger{Int64}(3, 5)
source

SensitivityReport

JuMP.SensitivityReportType
SensitivityReport

See lp_sensitivity_report.

source

SkewSymmetricMatrixShape

JuMP.SkewSymmetricMatrixShapeType
SkewSymmetricMatrixShape

Shape object for a skew symmetric square matrix of side_dimension rows and columns. The vectorized form contains the entries of the upper-right triangular part of the matrix (without the diagonal) given column by column (or equivalently, the entries of the lower-left triangular part given row by row). The diagonal is zero.

source

SkewSymmetricMatrixSpace

JuMP.SkewSymmetricMatrixSpaceType
SkewSymmetricMatrixSpace()

Use in the @variable macro to constrain a matrix of variables to be skew-symmetric.

Example

julia> model = Model();
 
 julia> @variable(model, Q[1:2, 1:2] in SkewSymmetricMatrixSpace())
 2×2 Matrix{AffExpr}:
  0        Q[1,2]
- -Q[1,2]  0
source

SkipModelConvertScalarSetWrapper

JuMP.SkipModelConvertScalarSetWrapperType
SkipModelConvertScalarSetWrapper(set::MOI.AbstractScalarSet)

JuMP uses model_convert to automatically promote MOI.AbstractScalarSet sets to the same value_type as the model.

In cases there this is undesirable, wrap the set in SkipModelConvertScalarSetWrapper to pass the set un-changed to the solver.

Warning

This struct is intended for use internally by JuMP extensions. You should not need to use it in regular JuMP code.

Example

julia> model = Model();
+ -Q[1,2]  0
source

SkipModelConvertScalarSetWrapper

JuMP.SkipModelConvertScalarSetWrapperType
SkipModelConvertScalarSetWrapper(set::MOI.AbstractScalarSet)

JuMP uses model_convert to automatically promote MOI.AbstractScalarSet sets to the same value_type as the model.

In cases there this is undesirable, wrap the set in SkipModelConvertScalarSetWrapper to pass the set un-changed to the solver.

Warning

This struct is intended for use internally by JuMP extensions. You should not need to use it in regular JuMP code.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -3048,15 +3048,15 @@
 x = 0.5
 
 julia> @constraint(model, x in SkipModelConvertScalarSetWrapper(MOI.EqualTo(1 // 2)))
-x = 1//2
source

SquareMatrixShape

JuMP.SquareMatrixShapeType
SquareMatrixShape

Shape object for a square matrix of side_dimension rows and columns. The vectorized form contains the entries of the matrix given column by column (or equivalently, the entries of the lower-left triangular part given row by row).

source

SymmetricMatrixAdjointShape

JuMP.SymmetricMatrixAdjointShapeType
SymmetricMatrixAdjointShape(side_dimension)

The dual_shape of SymmetricMatrixShape.

This shape is not intended for regular use.

source

SymmetricMatrixShape

JuMP.SymmetricMatrixShapeType
SymmetricMatrixShape(
+x = 1//2
source

SquareMatrixShape

JuMP.SquareMatrixShapeType
SquareMatrixShape

Shape object for a square matrix of side_dimension rows and columns. The vectorized form contains the entries of the matrix given column by column (or equivalently, the entries of the lower-left triangular part given row by row).

source

SymmetricMatrixAdjointShape

JuMP.SymmetricMatrixAdjointShapeType
SymmetricMatrixAdjointShape(side_dimension)

The dual_shape of SymmetricMatrixShape.

This shape is not intended for regular use.

source

SymmetricMatrixShape

JuMP.SymmetricMatrixShapeType
SymmetricMatrixShape(
     side_dimension::Int;
     needs_adjoint_dual::Bool = false,
-)

The shape object for a symmetric square matrix of side_dimension rows and columns.

The vectorized form contains the entries of the upper-right triangular part of the matrix given column by column (or equivalently, the entries of the lower-left triangular part given row by row).

needs_adjoint_dual

By default, the dual_shape of SymmetricMatrixShape is also SymmetricMatrixShape. This is true for cases such as a LinearAlgebra.Symmetric matrix in PSDCone.

However, JuMP also supports LinearAlgebra.Symmetric matrix in Zeros, which is interpreted as an element-wise equality constraint. By exploiting symmetry, we pass only the upper triangle of the equality constraints. This works for the primal, but it leads to a factor of 2 difference in the off-diagonal dual elements. (The dual value of the (i, j) element in the triangle formulation should be divided by 2 when spread across the (i, j) and (j, i) elements in the square matrix formulation.) If the constraint has this dual inconsistency, set needs_adjoint_dual = true.

source

SymmetricMatrixSpace

JuMP.SymmetricMatrixSpaceType
SymmetricMatrixSpace()

Use in the @variable macro to constrain a matrix of variables to be symmetric.

Example

julia> model = Model();
+)

The shape object for a symmetric square matrix of side_dimension rows and columns.

The vectorized form contains the entries of the upper-right triangular part of the matrix given column by column (or equivalently, the entries of the lower-left triangular part given row by row).

needs_adjoint_dual

By default, the dual_shape of SymmetricMatrixShape is also SymmetricMatrixShape. This is true for cases such as a LinearAlgebra.Symmetric matrix in PSDCone.

However, JuMP also supports LinearAlgebra.Symmetric matrix in Zeros, which is interpreted as an element-wise equality constraint. By exploiting symmetry, we pass only the upper triangle of the equality constraints. This works for the primal, but it leads to a factor of 2 difference in the off-diagonal dual elements. (The dual value of the (i, j) element in the triangle formulation should be divided by 2 when spread across the (i, j) and (j, i) elements in the square matrix formulation.) If the constraint has this dual inconsistency, set needs_adjoint_dual = true.

source

SymmetricMatrixSpace

JuMP.SymmetricMatrixSpaceType
SymmetricMatrixSpace()

Use in the @variable macro to constrain a matrix of variables to be symmetric.

Example

julia> model = Model();
 
 julia> @variable(model, Q[1:2, 1:2] in SymmetricMatrixSpace())
 2×2 LinearAlgebra.Symmetric{VariableRef, Matrix{VariableRef}}:
  Q[1,1]  Q[1,2]
- Q[1,2]  Q[2,2]
source

TerminationStatusCode

JuMP.TerminationStatusCodeType
TerminationStatusCode

An Enum of possible values for the TerminationStatus attribute. This attribute is meant to explain the reason why the optimizer stopped executing in the most recent call to optimize!.

Values

Possible values are:

  • OPTIMIZE_NOT_CALLED: The algorithm has not started.
  • OPTIMAL: The algorithm found a globally optimal solution.
  • INFEASIBLE: The algorithm concluded that no feasible solution exists.
  • DUAL_INFEASIBLE: The algorithm concluded that no dual bound exists for the problem. If, additionally, a feasible (primal) solution is known to exist, this status typically implies that the problem is unbounded, with some technical exceptions.
  • LOCALLY_SOLVED: The algorithm converged to a stationary point, local optimal solution, could not find directions for improvement, or otherwise completed its search without global guarantees.
  • LOCALLY_INFEASIBLE: The algorithm converged to an infeasible point or otherwise completed its search without finding a feasible solution, without guarantees that no feasible solution exists.
  • INFEASIBLE_OR_UNBOUNDED: The algorithm stopped because it decided that the problem is infeasible or unbounded; this occasionally happens during MIP presolve.
  • ALMOST_OPTIMAL: The algorithm found a globally optimal solution to relaxed tolerances.
  • ALMOST_INFEASIBLE: The algorithm concluded that no feasible solution exists within relaxed tolerances.
  • ALMOST_DUAL_INFEASIBLE: The algorithm concluded that no dual bound exists for the problem within relaxed tolerances.
  • ALMOST_LOCALLY_SOLVED: The algorithm converged to a stationary point, local optimal solution, or could not find directions for improvement within relaxed tolerances.
  • ITERATION_LIMIT: An iterative algorithm stopped after conducting the maximum number of iterations.
  • TIME_LIMIT: The algorithm stopped after a user-specified computation time.
  • NODE_LIMIT: A branch-and-bound algorithm stopped because it explored a maximum number of nodes in the branch-and-bound tree.
  • SOLUTION_LIMIT: The algorithm stopped because it found the required number of solutions. This is often used in MIPs to get the solver to return the first feasible solution it encounters.
  • MEMORY_LIMIT: The algorithm stopped because it ran out of memory.
  • OBJECTIVE_LIMIT: The algorithm stopped because it found a solution better than a minimum limit set by the user.
  • NORM_LIMIT: The algorithm stopped because the norm of an iterate became too large.
  • OTHER_LIMIT: The algorithm stopped due to a limit not covered by one of the _LIMIT_ statuses above.
  • SLOW_PROGRESS: The algorithm stopped because it was unable to continue making progress towards the solution.
  • NUMERICAL_ERROR: The algorithm stopped because it encountered unrecoverable numerical error.
  • INVALID_MODEL: The algorithm stopped because the model is invalid.
  • INVALID_OPTION: The algorithm stopped because it was provided an invalid option.
  • INTERRUPTED: The algorithm stopped because of an interrupt signal.
  • OTHER_ERROR: The algorithm stopped because of an error not covered by one of the statuses defined above.
source

UnorderedPair

JuMP.UnorderedPairType
UnorderedPair(a::T, b::T)

A wrapper type used by GenericQuadExpr with fields .a and .b.

Example

julia> model = Model();
+ Q[1,2]  Q[2,2]
source

TerminationStatusCode

JuMP.TerminationStatusCodeType
TerminationStatusCode

An Enum of possible values for the TerminationStatus attribute. This attribute is meant to explain the reason why the optimizer stopped executing in the most recent call to optimize!.

Values

Possible values are:

  • OPTIMIZE_NOT_CALLED: The algorithm has not started.
  • OPTIMAL: The algorithm found a globally optimal solution.
  • INFEASIBLE: The algorithm concluded that no feasible solution exists.
  • DUAL_INFEASIBLE: The algorithm concluded that no dual bound exists for the problem. If, additionally, a feasible (primal) solution is known to exist, this status typically implies that the problem is unbounded, with some technical exceptions.
  • LOCALLY_SOLVED: The algorithm converged to a stationary point, local optimal solution, could not find directions for improvement, or otherwise completed its search without global guarantees.
  • LOCALLY_INFEASIBLE: The algorithm converged to an infeasible point or otherwise completed its search without finding a feasible solution, without guarantees that no feasible solution exists.
  • INFEASIBLE_OR_UNBOUNDED: The algorithm stopped because it decided that the problem is infeasible or unbounded; this occasionally happens during MIP presolve.
  • ALMOST_OPTIMAL: The algorithm found a globally optimal solution to relaxed tolerances.
  • ALMOST_INFEASIBLE: The algorithm concluded that no feasible solution exists within relaxed tolerances.
  • ALMOST_DUAL_INFEASIBLE: The algorithm concluded that no dual bound exists for the problem within relaxed tolerances.
  • ALMOST_LOCALLY_SOLVED: The algorithm converged to a stationary point, local optimal solution, or could not find directions for improvement within relaxed tolerances.
  • ITERATION_LIMIT: An iterative algorithm stopped after conducting the maximum number of iterations.
  • TIME_LIMIT: The algorithm stopped after a user-specified computation time.
  • NODE_LIMIT: A branch-and-bound algorithm stopped because it explored a maximum number of nodes in the branch-and-bound tree.
  • SOLUTION_LIMIT: The algorithm stopped because it found the required number of solutions. This is often used in MIPs to get the solver to return the first feasible solution it encounters.
  • MEMORY_LIMIT: The algorithm stopped because it ran out of memory.
  • OBJECTIVE_LIMIT: The algorithm stopped because it found a solution better than a minimum limit set by the user.
  • NORM_LIMIT: The algorithm stopped because the norm of an iterate became too large.
  • OTHER_LIMIT: The algorithm stopped due to a limit not covered by one of the _LIMIT_ statuses above.
  • SLOW_PROGRESS: The algorithm stopped because it was unable to continue making progress towards the solution.
  • NUMERICAL_ERROR: The algorithm stopped because it encountered unrecoverable numerical error.
  • INVALID_MODEL: The algorithm stopped because the model is invalid.
  • INVALID_OPTION: The algorithm stopped because it was provided an invalid option.
  • INTERRUPTED: The algorithm stopped because of an interrupt signal.
  • OTHER_ERROR: The algorithm stopped because of an error not covered by one of the statuses defined above.
source

UnorderedPair

JuMP.UnorderedPairType
UnorderedPair(a::T, b::T)

A wrapper type used by GenericQuadExpr with fields .a and .b.

Example

julia> model = Model();
 
 julia> @variable(model, x[1:2]);
 
@@ -3065,12 +3065,12 @@
 
 julia> expr.terms
 OrderedCollections.OrderedDict{UnorderedPair{VariableRef}, Float64} with 1 entry:
-  UnorderedPair{VariableRef}(x[1], x[2]) => 2.0
source

UpperBoundRef

JuMP.UpperBoundRefFunction
UpperBoundRef(v::GenericVariableRef)

Return a constraint reference to the upper bound constraint of v.

Errors if one does not exist.

See also has_upper_bound, upper_bound, set_upper_bound, delete_upper_bound.

Example

julia> model = Model();
+  UnorderedPair{VariableRef}(x[1], x[2]) => 2.0
source

UpperBoundRef

JuMP.UpperBoundRefFunction
UpperBoundRef(v::GenericVariableRef)

Return a constraint reference to the upper bound constraint of v.

Errors if one does not exist.

See also has_upper_bound, upper_bound, set_upper_bound, delete_upper_bound.

Example

julia> model = Model();
 
 julia> @variable(model, x <= 1.0);
 
 julia> UpperBoundRef(x)
-x ≤ 1
source

VariableConstrainedOnCreation

JuMP.VariableConstrainedOnCreationType
VariableConstrainedOnCreation <: AbstractVariable

Variable scalar_variables constrained to belong to set.

Adding this variable can be understood as doing:

function JuMP.add_variable(
+x ≤ 1
source

VariableConstrainedOnCreation

JuMP.VariableConstrainedOnCreationType
VariableConstrainedOnCreation <: AbstractVariable

Variable scalar_variables constrained to belong to set.

Adding this variable can be understood as doing:

function JuMP.add_variable(
     model::GenericModel,
     variable::VariableConstrainedOnCreation,
     names,
@@ -3078,9 +3078,9 @@
     var_ref = add_variable(model, variable.scalar_variable, name)
     add_constraint(model, VectorConstraint(var_ref, variable.set))
     return var_ref
-end

but adds the variables with MOI.add_constrained_variable(model, variable.set) instead.

source

VariableInfo

JuMP.VariableInfoType
VariableInfo{S,T,U,V}

A struct by JuMP internally when creating variables. This may also be used by JuMP extensions to create new types of variables.

See also: ScalarVariable.

source

VariableNotOwned

JuMP.VariableNotOwnedType
struct VariableNotOwned{V<:AbstractVariableRef} <: Exception
+end

but adds the variables with MOI.add_constrained_variable(model, variable.set) instead.

source

VariableInfo

JuMP.VariableInfoType
VariableInfo{S,T,U,V}

A struct by JuMP internally when creating variables. This may also be used by JuMP extensions to create new types of variables.

See also: ScalarVariable.

source

VariableNotOwned

JuMP.VariableNotOwnedType
struct VariableNotOwned{V<:AbstractVariableRef} <: Exception
     variable::V
-end

The variable variable was used in a model different to owner_model(variable).

source

VariableRef

JuMP.VariableRefType
GenericVariableRef{T} <: AbstractVariableRef

Holds a reference to the model and the corresponding MOI.VariableIndex.

source

VariablesConstrainedOnCreation

JuMP.VariablesConstrainedOnCreationType
VariablesConstrainedOnCreation <: AbstractVariable

Vector of variables scalar_variables constrained to belong to set. Adding this variable can be thought as doing:

function JuMP.add_variable(
+end

The variable variable was used in a model different to owner_model(variable).

source

VariableRef

JuMP.VariableRefType
GenericVariableRef{T} <: AbstractVariableRef

Holds a reference to the model and the corresponding MOI.VariableIndex.

source

VariablesConstrainedOnCreation

JuMP.VariablesConstrainedOnCreationType
VariablesConstrainedOnCreation <: AbstractVariable

Vector of variables scalar_variables constrained to belong to set. Adding this variable can be thought as doing:

function JuMP.add_variable(
     model::GenericModel,
     variable::VariablesConstrainedOnCreation,
     names,
@@ -3089,7 +3089,7 @@
     var_refs = add_variable.(model, variable.scalar_variables, v_names)
     add_constraint(model, VectorConstraint(var_refs, variable.set))
     return reshape_vector(var_refs, variable.shape)
-end

but adds the variables with MOI.add_constrained_variables(model, variable.set) instead. See the MOI documentation for the difference between adding the variables with MOI.add_constrained_variables and adding them with MOI.add_variables and adding the constraint separately.

source

VectorConstraint

JuMP.VectorConstraintType
struct VectorConstraint

The data for a vector constraint.

See also the documentation on JuMP's representation of constraints.

Fields

  • func: field contains a JuMP object representing the function
  • set: field contains the MOI set.
  • shape: field contains an AbstractShape matching the form in which the constraint was constructed (for example, by using matrices or flat vectors).

Example

julia> model = Model();
+end

but adds the variables with MOI.add_constrained_variables(model, variable.set) instead. See the MOI documentation for the difference between adding the variables with MOI.add_constrained_variables and adding them with MOI.add_variables and adding the constraint separately.

source

VectorConstraint

JuMP.VectorConstraintType
struct VectorConstraint

The data for a vector constraint.

See also the documentation on JuMP's representation of constraints.

Fields

  • func: field contains a JuMP object representing the function
  • set: field contains the MOI set.
  • shape: field contains an AbstractShape matching the form in which the constraint was constructed (for example, by using matrices or flat vectors).

Example

julia> model = Model();
 
 julia> @variable(model, x[1:3]);
 
@@ -3112,14 +3112,14 @@
 MathOptInterface.SecondOrderCone(3)
 
 julia> object.shape
-VectorShape()
source

VectorShape

JuMP.VectorShapeType
VectorShape()

An AbstractShape that represents vector-valued constraints.

Example

julia> model = Model();
+VectorShape()
source

VectorShape

JuMP.VectorShapeType
VectorShape()

An AbstractShape that represents vector-valued constraints.

Example

julia> model = Model();
 
 julia> @variable(model, x[1:2]);
 
 julia> c = @constraint(model, x in SOS1());
 
 julia> shape(constraint_object(c))
-VectorShape()
source

Zeros

JuMP.ZerosType
Zeros()

The JuMP equivalent of the MOI.Zeros set, in which the dimension is inferred from the corresponding function.

Example

julia> model = Model();
+VectorShape()
source

Zeros

JuMP.ZerosType
Zeros()

The JuMP equivalent of the MOI.Zeros set, in which the dimension is inferred from the corresponding function.

Example

julia> model = Model();
 
 julia> @variable(model, x[1:2])
 2-element Vector{VariableRef}:
@@ -3134,7 +3134,7 @@
 julia> b = [5, 6];
 
 julia> @constraint(model, A * x == b)
-[x[1] + 2 x[2] - 5, 3 x[1] + 4 x[2] - 6] ∈ Zeros()
source

ALMOST_DUAL_INFEASIBLE

JuMP.ALMOST_DUAL_INFEASIBLEConstant
ALMOST_DUAL_INFEASIBLE::TerminationStatusCode

An instance of the TerminationStatusCode enum.

ALMOST_DUAL_INFEASIBLE: The algorithm concluded that no dual bound exists for the problem within relaxed tolerances.

source

ALMOST_INFEASIBLE

JuMP.ALMOST_INFEASIBLEConstant
ALMOST_INFEASIBLE::TerminationStatusCode

An instance of the TerminationStatusCode enum.

ALMOST_INFEASIBLE: The algorithm concluded that no feasible solution exists within relaxed tolerances.

source

ALMOST_LOCALLY_SOLVED

JuMP.ALMOST_LOCALLY_SOLVEDConstant
ALMOST_LOCALLY_SOLVED::TerminationStatusCode

An instance of the TerminationStatusCode enum.

ALMOST_LOCALLY_SOLVED: The algorithm converged to a stationary point, local optimal solution, or could not find directions for improvement within relaxed tolerances.

source

ALMOST_OPTIMAL

JuMP.ALMOST_OPTIMALConstant
ALMOST_OPTIMAL::TerminationStatusCode

An instance of the TerminationStatusCode enum.

ALMOST_OPTIMAL: The algorithm found a globally optimal solution to relaxed tolerances.

source

AUTOMATIC

JuMP.AUTOMATICConstant

moi_backend field holds a CachingOptimizer in AUTOMATIC mode.

source

DIRECT

JuMP.DIRECTConstant

moi_backend field holds an AbstractOptimizer. No extra copy of the model is stored. The moi_backend must support add_constraint etc.

source

DUAL_INFEASIBLE

JuMP.DUAL_INFEASIBLEConstant
DUAL_INFEASIBLE::TerminationStatusCode

An instance of the TerminationStatusCode enum.

DUAL_INFEASIBLE: The algorithm concluded that no dual bound exists for the problem. If, additionally, a feasible (primal) solution is known to exist, this status typically implies that the problem is unbounded, with some technical exceptions.

source

FEASIBILITY_SENSE

JuMP.FEASIBILITY_SENSEConstant
FEASIBILITY_SENSE::OptimizationSense

An instance of the OptimizationSense enum.

FEASIBILITY_SENSE: the model does not have an objective function

source

FEASIBLE_POINT

JuMP.FEASIBLE_POINTConstant
FEASIBLE_POINT::ResultStatusCode

An instance of the ResultStatusCode enum.

FEASIBLE_POINT: the result vector is a feasible point.

source

INFEASIBILITY_CERTIFICATE

JuMP.INFEASIBILITY_CERTIFICATEConstant
INFEASIBILITY_CERTIFICATE::ResultStatusCode

An instance of the ResultStatusCode enum.

INFEASIBILITY_CERTIFICATE: the result vector is an infeasibility certificate. If the PrimalStatus is INFEASIBILITY_CERTIFICATE, then the primal result vector is a certificate of dual infeasibility. If the DualStatus is INFEASIBILITY_CERTIFICATE, then the dual result vector is a proof of primal infeasibility.

source

INFEASIBLE

JuMP.INFEASIBLEConstant
INFEASIBLE::TerminationStatusCode

An instance of the TerminationStatusCode enum.

INFEASIBLE: The algorithm concluded that no feasible solution exists.

source

INFEASIBLE_OR_UNBOUNDED

JuMP.INFEASIBLE_OR_UNBOUNDEDConstant
INFEASIBLE_OR_UNBOUNDED::TerminationStatusCode

An instance of the TerminationStatusCode enum.

INFEASIBLE_OR_UNBOUNDED: The algorithm stopped because it decided that the problem is infeasible or unbounded; this occasionally happens during MIP presolve.

source

INFEASIBLE_POINT

JuMP.INFEASIBLE_POINTConstant
INFEASIBLE_POINT::ResultStatusCode

An instance of the ResultStatusCode enum.

INFEASIBLE_POINT: the result vector is an infeasible point.

source

INTERRUPTED

JuMP.INTERRUPTEDConstant
INTERRUPTED::TerminationStatusCode

An instance of the TerminationStatusCode enum.

INTERRUPTED: The algorithm stopped because of an interrupt signal.

source

INVALID_MODEL

JuMP.INVALID_MODELConstant
INVALID_MODEL::TerminationStatusCode

An instance of the TerminationStatusCode enum.

INVALID_MODEL: The algorithm stopped because the model is invalid.

source

INVALID_OPTION

JuMP.INVALID_OPTIONConstant
INVALID_OPTION::TerminationStatusCode

An instance of the TerminationStatusCode enum.

INVALID_OPTION: The algorithm stopped because it was provided an invalid option.

source

ITERATION_LIMIT

JuMP.ITERATION_LIMITConstant
ITERATION_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

ITERATION_LIMIT: An iterative algorithm stopped after conducting the maximum number of iterations.

source

LOCALLY_INFEASIBLE

JuMP.LOCALLY_INFEASIBLEConstant
LOCALLY_INFEASIBLE::TerminationStatusCode

An instance of the TerminationStatusCode enum.

LOCALLY_INFEASIBLE: The algorithm converged to an infeasible point or otherwise completed its search without finding a feasible solution, without guarantees that no feasible solution exists.

source

LOCALLY_SOLVED

JuMP.LOCALLY_SOLVEDConstant
LOCALLY_SOLVED::TerminationStatusCode

An instance of the TerminationStatusCode enum.

LOCALLY_SOLVED: The algorithm converged to a stationary point, local optimal solution, could not find directions for improvement, or otherwise completed its search without global guarantees.

source

MANUAL

JuMP.MANUALConstant

moi_backend field holds a CachingOptimizer in MANUAL mode.

source

MAX_SENSE

JuMP.MAX_SENSEConstant
MAX_SENSE::OptimizationSense

An instance of the OptimizationSense enum.

MAX_SENSE: the goal is to maximize the objective function

source

MEMORY_LIMIT

JuMP.MEMORY_LIMITConstant
MEMORY_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

MEMORY_LIMIT: The algorithm stopped because it ran out of memory.

source

MIN_SENSE

JuMP.MIN_SENSEConstant
MIN_SENSE::OptimizationSense

An instance of the OptimizationSense enum.

MIN_SENSE: the goal is to minimize the objective function

source

NEARLY_FEASIBLE_POINT

JuMP.NEARLY_FEASIBLE_POINTConstant
NEARLY_FEASIBLE_POINT::ResultStatusCode

An instance of the ResultStatusCode enum.

NEARLY_FEASIBLE_POINT: the result vector is feasible if some constraint tolerances are relaxed.

source

NEARLY_INFEASIBILITY_CERTIFICATE

JuMP.NEARLY_INFEASIBILITY_CERTIFICATEConstant
NEARLY_INFEASIBILITY_CERTIFICATE::ResultStatusCode

An instance of the ResultStatusCode enum.

NEARLY_INFEASIBILITY_CERTIFICATE: the result satisfies a relaxed criterion for a certificate of infeasibility.

source

NEARLY_REDUCTION_CERTIFICATE

JuMP.NEARLY_REDUCTION_CERTIFICATEConstant
NEARLY_REDUCTION_CERTIFICATE::ResultStatusCode

An instance of the ResultStatusCode enum.

NEARLY_REDUCTION_CERTIFICATE: the result satisfies a relaxed criterion for an ill-posed certificate.

source

NODE_LIMIT

JuMP.NODE_LIMITConstant
NODE_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

NODE_LIMIT: A branch-and-bound algorithm stopped because it explored a maximum number of nodes in the branch-and-bound tree.

source

NORM_LIMIT

JuMP.NORM_LIMITConstant
NORM_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

NORM_LIMIT: The algorithm stopped because the norm of an iterate became too large.

source

NO_SOLUTION

JuMP.NO_SOLUTIONConstant
NO_SOLUTION::ResultStatusCode

An instance of the ResultStatusCode enum.

NO_SOLUTION: the result vector is empty.

source

NUMERICAL_ERROR

JuMP.NUMERICAL_ERRORConstant
NUMERICAL_ERROR::TerminationStatusCode

An instance of the TerminationStatusCode enum.

NUMERICAL_ERROR: The algorithm stopped because it encountered unrecoverable numerical error.

source

OBJECTIVE_LIMIT

JuMP.OBJECTIVE_LIMITConstant
OBJECTIVE_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

OBJECTIVE_LIMIT: The algorithm stopped because it found a solution better than a minimum limit set by the user.

source

OPTIMAL

JuMP.OPTIMALConstant
OPTIMAL::TerminationStatusCode

An instance of the TerminationStatusCode enum.

OPTIMAL: The algorithm found a globally optimal solution.

source

OPTIMIZE_NOT_CALLED

JuMP.OPTIMIZE_NOT_CALLEDConstant
OPTIMIZE_NOT_CALLED::TerminationStatusCode

An instance of the TerminationStatusCode enum.

OPTIMIZE_NOT_CALLED: The algorithm has not started.

source

OTHER_ERROR

JuMP.OTHER_ERRORConstant
OTHER_ERROR::TerminationStatusCode

An instance of the TerminationStatusCode enum.

OTHER_ERROR: The algorithm stopped because of an error not covered by one of the statuses defined above.

source

OTHER_LIMIT

JuMP.OTHER_LIMITConstant
OTHER_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

OTHER_LIMIT: The algorithm stopped due to a limit not covered by one of the _LIMIT_ statuses above.

source

OTHER_RESULT_STATUS

JuMP.OTHER_RESULT_STATUSConstant
OTHER_RESULT_STATUS::ResultStatusCode

An instance of the ResultStatusCode enum.

OTHER_RESULT_STATUS: the result vector contains a solution with an interpretation not covered by one of the statuses defined above

source

REDUCTION_CERTIFICATE

JuMP.REDUCTION_CERTIFICATEConstant
REDUCTION_CERTIFICATE::ResultStatusCode

An instance of the ResultStatusCode enum.

REDUCTION_CERTIFICATE: the result vector is an ill-posed certificate; see this article for details. If the PrimalStatus is REDUCTION_CERTIFICATE, then the primal result vector is a proof that the dual problem is ill-posed. If the DualStatus is REDUCTION_CERTIFICATE, then the dual result vector is a proof that the primal is ill-posed.

source

SLOW_PROGRESS

JuMP.SLOW_PROGRESSConstant
SLOW_PROGRESS::TerminationStatusCode

An instance of the TerminationStatusCode enum.

SLOW_PROGRESS: The algorithm stopped because it was unable to continue making progress towards the solution.

source

SOLUTION_LIMIT

JuMP.SOLUTION_LIMITConstant
SOLUTION_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

SOLUTION_LIMIT: The algorithm stopped because it found the required number of solutions. This is often used in MIPs to get the solver to return the first feasible solution it encounters.

source

TIME_LIMIT

JuMP.TIME_LIMITConstant
TIME_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

TIME_LIMIT: The algorithm stopped after a user-specified computation time.

source

UNKNOWN_RESULT_STATUS

JuMP.UNKNOWN_RESULT_STATUSConstant
UNKNOWN_RESULT_STATUS::ResultStatusCode

An instance of the ResultStatusCode enum.

UNKNOWN_RESULT_STATUS: the result vector contains a solution with an unknown interpretation.

source

op_and

JuMP.op_andConstant
op_and(x, y)

A function that falls back to x & y, but when called with JuMP variables or expressions, returns a GenericNonlinearExpr.

Example

julia> model = Model();
+[x[1] + 2 x[2] - 5, 3 x[1] + 4 x[2] - 6] ∈ Zeros()
source

ALMOST_DUAL_INFEASIBLE

JuMP.ALMOST_DUAL_INFEASIBLEConstant
ALMOST_DUAL_INFEASIBLE::TerminationStatusCode

An instance of the TerminationStatusCode enum.

ALMOST_DUAL_INFEASIBLE: The algorithm concluded that no dual bound exists for the problem within relaxed tolerances.

source

ALMOST_INFEASIBLE

JuMP.ALMOST_INFEASIBLEConstant
ALMOST_INFEASIBLE::TerminationStatusCode

An instance of the TerminationStatusCode enum.

ALMOST_INFEASIBLE: The algorithm concluded that no feasible solution exists within relaxed tolerances.

source

ALMOST_LOCALLY_SOLVED

JuMP.ALMOST_LOCALLY_SOLVEDConstant
ALMOST_LOCALLY_SOLVED::TerminationStatusCode

An instance of the TerminationStatusCode enum.

ALMOST_LOCALLY_SOLVED: The algorithm converged to a stationary point, local optimal solution, or could not find directions for improvement within relaxed tolerances.

source

ALMOST_OPTIMAL

JuMP.ALMOST_OPTIMALConstant
ALMOST_OPTIMAL::TerminationStatusCode

An instance of the TerminationStatusCode enum.

ALMOST_OPTIMAL: The algorithm found a globally optimal solution to relaxed tolerances.

source

AUTOMATIC

JuMP.AUTOMATICConstant

moi_backend field holds a CachingOptimizer in AUTOMATIC mode.

source

DIRECT

JuMP.DIRECTConstant

moi_backend field holds an AbstractOptimizer. No extra copy of the model is stored. The moi_backend must support add_constraint etc.

source

DUAL_INFEASIBLE

JuMP.DUAL_INFEASIBLEConstant
DUAL_INFEASIBLE::TerminationStatusCode

An instance of the TerminationStatusCode enum.

DUAL_INFEASIBLE: The algorithm concluded that no dual bound exists for the problem. If, additionally, a feasible (primal) solution is known to exist, this status typically implies that the problem is unbounded, with some technical exceptions.

source

FEASIBILITY_SENSE

JuMP.FEASIBILITY_SENSEConstant
FEASIBILITY_SENSE::OptimizationSense

An instance of the OptimizationSense enum.

FEASIBILITY_SENSE: the model does not have an objective function

source

FEASIBLE_POINT

JuMP.FEASIBLE_POINTConstant
FEASIBLE_POINT::ResultStatusCode

An instance of the ResultStatusCode enum.

FEASIBLE_POINT: the result vector is a feasible point.

source

INFEASIBILITY_CERTIFICATE

JuMP.INFEASIBILITY_CERTIFICATEConstant
INFEASIBILITY_CERTIFICATE::ResultStatusCode

An instance of the ResultStatusCode enum.

INFEASIBILITY_CERTIFICATE: the result vector is an infeasibility certificate. If the PrimalStatus is INFEASIBILITY_CERTIFICATE, then the primal result vector is a certificate of dual infeasibility. If the DualStatus is INFEASIBILITY_CERTIFICATE, then the dual result vector is a proof of primal infeasibility.

source

INFEASIBLE

JuMP.INFEASIBLEConstant
INFEASIBLE::TerminationStatusCode

An instance of the TerminationStatusCode enum.

INFEASIBLE: The algorithm concluded that no feasible solution exists.

source

INFEASIBLE_OR_UNBOUNDED

JuMP.INFEASIBLE_OR_UNBOUNDEDConstant
INFEASIBLE_OR_UNBOUNDED::TerminationStatusCode

An instance of the TerminationStatusCode enum.

INFEASIBLE_OR_UNBOUNDED: The algorithm stopped because it decided that the problem is infeasible or unbounded; this occasionally happens during MIP presolve.

source

INFEASIBLE_POINT

JuMP.INFEASIBLE_POINTConstant
INFEASIBLE_POINT::ResultStatusCode

An instance of the ResultStatusCode enum.

INFEASIBLE_POINT: the result vector is an infeasible point.

source

INTERRUPTED

JuMP.INTERRUPTEDConstant
INTERRUPTED::TerminationStatusCode

An instance of the TerminationStatusCode enum.

INTERRUPTED: The algorithm stopped because of an interrupt signal.

source

INVALID_MODEL

JuMP.INVALID_MODELConstant
INVALID_MODEL::TerminationStatusCode

An instance of the TerminationStatusCode enum.

INVALID_MODEL: The algorithm stopped because the model is invalid.

source

INVALID_OPTION

JuMP.INVALID_OPTIONConstant
INVALID_OPTION::TerminationStatusCode

An instance of the TerminationStatusCode enum.

INVALID_OPTION: The algorithm stopped because it was provided an invalid option.

source

ITERATION_LIMIT

JuMP.ITERATION_LIMITConstant
ITERATION_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

ITERATION_LIMIT: An iterative algorithm stopped after conducting the maximum number of iterations.

source

LOCALLY_INFEASIBLE

JuMP.LOCALLY_INFEASIBLEConstant
LOCALLY_INFEASIBLE::TerminationStatusCode

An instance of the TerminationStatusCode enum.

LOCALLY_INFEASIBLE: The algorithm converged to an infeasible point or otherwise completed its search without finding a feasible solution, without guarantees that no feasible solution exists.

source

LOCALLY_SOLVED

JuMP.LOCALLY_SOLVEDConstant
LOCALLY_SOLVED::TerminationStatusCode

An instance of the TerminationStatusCode enum.

LOCALLY_SOLVED: The algorithm converged to a stationary point, local optimal solution, could not find directions for improvement, or otherwise completed its search without global guarantees.

source

MANUAL

JuMP.MANUALConstant

moi_backend field holds a CachingOptimizer in MANUAL mode.

source

MAX_SENSE

JuMP.MAX_SENSEConstant
MAX_SENSE::OptimizationSense

An instance of the OptimizationSense enum.

MAX_SENSE: the goal is to maximize the objective function

source

MEMORY_LIMIT

JuMP.MEMORY_LIMITConstant
MEMORY_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

MEMORY_LIMIT: The algorithm stopped because it ran out of memory.

source

MIN_SENSE

JuMP.MIN_SENSEConstant
MIN_SENSE::OptimizationSense

An instance of the OptimizationSense enum.

MIN_SENSE: the goal is to minimize the objective function

source

NEARLY_FEASIBLE_POINT

JuMP.NEARLY_FEASIBLE_POINTConstant
NEARLY_FEASIBLE_POINT::ResultStatusCode

An instance of the ResultStatusCode enum.

NEARLY_FEASIBLE_POINT: the result vector is feasible if some constraint tolerances are relaxed.

source

NEARLY_INFEASIBILITY_CERTIFICATE

JuMP.NEARLY_INFEASIBILITY_CERTIFICATEConstant
NEARLY_INFEASIBILITY_CERTIFICATE::ResultStatusCode

An instance of the ResultStatusCode enum.

NEARLY_INFEASIBILITY_CERTIFICATE: the result satisfies a relaxed criterion for a certificate of infeasibility.

source

NEARLY_REDUCTION_CERTIFICATE

JuMP.NEARLY_REDUCTION_CERTIFICATEConstant
NEARLY_REDUCTION_CERTIFICATE::ResultStatusCode

An instance of the ResultStatusCode enum.

NEARLY_REDUCTION_CERTIFICATE: the result satisfies a relaxed criterion for an ill-posed certificate.

source

NODE_LIMIT

JuMP.NODE_LIMITConstant
NODE_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

NODE_LIMIT: A branch-and-bound algorithm stopped because it explored a maximum number of nodes in the branch-and-bound tree.

source

NORM_LIMIT

JuMP.NORM_LIMITConstant
NORM_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

NORM_LIMIT: The algorithm stopped because the norm of an iterate became too large.

source

NO_SOLUTION

JuMP.NO_SOLUTIONConstant
NO_SOLUTION::ResultStatusCode

An instance of the ResultStatusCode enum.

NO_SOLUTION: the result vector is empty.

source

NUMERICAL_ERROR

JuMP.NUMERICAL_ERRORConstant
NUMERICAL_ERROR::TerminationStatusCode

An instance of the TerminationStatusCode enum.

NUMERICAL_ERROR: The algorithm stopped because it encountered unrecoverable numerical error.

source

OBJECTIVE_LIMIT

JuMP.OBJECTIVE_LIMITConstant
OBJECTIVE_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

OBJECTIVE_LIMIT: The algorithm stopped because it found a solution better than a minimum limit set by the user.

source

OPTIMAL

JuMP.OPTIMALConstant
OPTIMAL::TerminationStatusCode

An instance of the TerminationStatusCode enum.

OPTIMAL: The algorithm found a globally optimal solution.

source

OPTIMIZE_NOT_CALLED

JuMP.OPTIMIZE_NOT_CALLEDConstant
OPTIMIZE_NOT_CALLED::TerminationStatusCode

An instance of the TerminationStatusCode enum.

OPTIMIZE_NOT_CALLED: The algorithm has not started.

source

OTHER_ERROR

JuMP.OTHER_ERRORConstant
OTHER_ERROR::TerminationStatusCode

An instance of the TerminationStatusCode enum.

OTHER_ERROR: The algorithm stopped because of an error not covered by one of the statuses defined above.

source

OTHER_LIMIT

JuMP.OTHER_LIMITConstant
OTHER_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

OTHER_LIMIT: The algorithm stopped due to a limit not covered by one of the _LIMIT_ statuses above.

source

OTHER_RESULT_STATUS

JuMP.OTHER_RESULT_STATUSConstant
OTHER_RESULT_STATUS::ResultStatusCode

An instance of the ResultStatusCode enum.

OTHER_RESULT_STATUS: the result vector contains a solution with an interpretation not covered by one of the statuses defined above

source

REDUCTION_CERTIFICATE

JuMP.REDUCTION_CERTIFICATEConstant
REDUCTION_CERTIFICATE::ResultStatusCode

An instance of the ResultStatusCode enum.

REDUCTION_CERTIFICATE: the result vector is an ill-posed certificate; see this article for details. If the PrimalStatus is REDUCTION_CERTIFICATE, then the primal result vector is a proof that the dual problem is ill-posed. If the DualStatus is REDUCTION_CERTIFICATE, then the dual result vector is a proof that the primal is ill-posed.

source

SLOW_PROGRESS

JuMP.SLOW_PROGRESSConstant
SLOW_PROGRESS::TerminationStatusCode

An instance of the TerminationStatusCode enum.

SLOW_PROGRESS: The algorithm stopped because it was unable to continue making progress towards the solution.

source

SOLUTION_LIMIT

JuMP.SOLUTION_LIMITConstant
SOLUTION_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

SOLUTION_LIMIT: The algorithm stopped because it found the required number of solutions. This is often used in MIPs to get the solver to return the first feasible solution it encounters.

source

TIME_LIMIT

JuMP.TIME_LIMITConstant
TIME_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

TIME_LIMIT: The algorithm stopped after a user-specified computation time.

source

UNKNOWN_RESULT_STATUS

JuMP.UNKNOWN_RESULT_STATUSConstant
UNKNOWN_RESULT_STATUS::ResultStatusCode

An instance of the ResultStatusCode enum.

UNKNOWN_RESULT_STATUS: the result vector contains a solution with an unknown interpretation.

source

op_and

JuMP.op_andConstant
op_and(x, y)

A function that falls back to x & y, but when called with JuMP variables or expressions, returns a GenericNonlinearExpr.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -3142,7 +3142,7 @@
 false
 
 julia> op_and(true, x)
-true && x
source

op_equal_to

JuMP.op_equal_toConstant
op_equal_to(x, y)

A function that falls back to x == y, but when called with JuMP variables or expressions, returns a GenericNonlinearExpr.

Example

julia> model = Model();
+true && x
source

op_equal_to

JuMP.op_equal_toConstant
op_equal_to(x, y)

A function that falls back to x == y, but when called with JuMP variables or expressions, returns a GenericNonlinearExpr.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -3150,7 +3150,7 @@
 true
 
 julia> op_equal_to(x, 2)
-x == 2
source

op_greater_than_or_equal_to

JuMP.op_greater_than_or_equal_toConstant
op_greater_than_or_equal_to(x, y)

A function that falls back to x >= y, but when called with JuMP variables or expressions, returns a GenericNonlinearExpr.

Example

julia> model = Model();
+x == 2
source

op_greater_than_or_equal_to

JuMP.op_greater_than_or_equal_toConstant
op_greater_than_or_equal_to(x, y)

A function that falls back to x >= y, but when called with JuMP variables or expressions, returns a GenericNonlinearExpr.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -3158,7 +3158,7 @@
 true
 
 julia> op_greater_than_or_equal_to(x, 2)
-x >= 2
source

op_less_than_or_equal_to

JuMP.op_less_than_or_equal_toConstant
op_less_than_or_equal_to(x, y)

A function that falls back to x <= y, but when called with JuMP variables or expressions, returns a GenericNonlinearExpr.

Example

julia> model = Model();
+x >= 2
source

op_less_than_or_equal_to

JuMP.op_less_than_or_equal_toConstant
op_less_than_or_equal_to(x, y)

A function that falls back to x <= y, but when called with JuMP variables or expressions, returns a GenericNonlinearExpr.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -3166,7 +3166,7 @@
 true
 
 julia> op_less_than_or_equal_to(x, 2)
-x <= 2
source

op_or

JuMP.op_orConstant
op_or(x, y)

A function that falls back to x | y, but when called with JuMP variables or expressions, returns a GenericNonlinearExpr.

Example

julia> model = Model();
+x <= 2
source

op_or

JuMP.op_orConstant
op_or(x, y)

A function that falls back to x | y, but when called with JuMP variables or expressions, returns a GenericNonlinearExpr.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -3174,7 +3174,7 @@
 true
 
 julia> op_or(true, x)
-true || x
source

op_strictly_greater_than

JuMP.op_strictly_greater_thanConstant
op_strictly_greater_than(x, y)

A function that falls back to x > y, but when called with JuMP variables or expressions, returns a GenericNonlinearExpr.

Example

julia> model = Model();
+true || x
source

op_strictly_greater_than

JuMP.op_strictly_greater_thanConstant
op_strictly_greater_than(x, y)

A function that falls back to x > y, but when called with JuMP variables or expressions, returns a GenericNonlinearExpr.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -3182,7 +3182,7 @@
 false
 
 julia> op_strictly_greater_than(x, 2)
-x > 2
source

op_strictly_less_than

JuMP.op_strictly_less_thanConstant
op_strictly_less_than(x, y)

A function that falls back to x < y, but when called with JuMP variables or expressions, returns a GenericNonlinearExpr.

Example

julia> model = Model();
+x > 2
source

op_strictly_less_than

JuMP.op_strictly_less_thanConstant
op_strictly_less_than(x, y)

A function that falls back to x < y, but when called with JuMP variables or expressions, returns a GenericNonlinearExpr.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -3190,7 +3190,7 @@
 true
 
 julia> op_strictly_less_than(x, 2)
-x < 2
source

Base.empty!(::GenericModel)

Base.empty!Method
empty!(model::GenericModel)::GenericModel

Empty the model, that is, remove all variables, constraints and model attributes but not optimizer attributes. Always return the argument.

Note: removes extensions data.

Example

julia> model = Model();
+x < 2
source

Base.empty!(::GenericModel)

Base.empty!Method
empty!(model::GenericModel)::GenericModel

Empty the model, that is, remove all variables, constraints and model attributes but not optimizer attributes. Always return the argument.

Note: removes extensions data.

Example

julia> model = Model();
 
 julia> @variable(model, x[1:2]);
 
@@ -3210,7 +3210,7 @@
 Subject to
 
 julia> isempty(model)
-true
source

Base.isempty(::GenericModel)

Base.isemptyMethod
isempty(model::GenericModel)

Verifies whether the model is empty, that is, whether the MOI backend is empty and whether the model is in the same state as at its creation, apart from optimizer attributes.

Example

julia> model = Model();
+true
source

Base.isempty(::GenericModel)

Base.isemptyMethod
isempty(model::GenericModel)

Verifies whether the model is empty, that is, whether the MOI backend is empty and whether the model is in the same state as at its creation, apart from optimizer attributes.

Example

julia> model = Model();
 
 julia> isempty(model)
 true
@@ -3218,7 +3218,7 @@
 julia> @variable(model, x[1:2]);
 
 julia> isempty(model)
-false
source

Base.copy(::AbstractModel)

Base.copyMethod
copy(model::AbstractModel)

Return a copy of the model model. It is similar to copy_model except that it does not return the mapping between the references of model and its copy.

Note

Model copy is not supported in DIRECT mode, that is, when a model is constructed using the direct_model constructor instead of the Model constructor. Moreover, independently on whether an optimizer was provided at model construction, the new model will have no optimizer, that is, an optimizer will have to be provided to the new model in the optimize! call.

Example

In the following example, a model model is constructed with a variable x and a constraint cref. It is then copied into a model new_model with the new references assigned to x_new and cref_new.

julia> model = Model();
+false
source

Base.copy(::AbstractModel)

Base.copyMethod
copy(model::AbstractModel)

Return a copy of the model model. It is similar to copy_model except that it does not return the mapping between the references of model and its copy.

Note

Model copy is not supported in DIRECT mode, that is, when a model is constructed using the direct_model constructor instead of the Model constructor. Moreover, independently on whether an optimizer was provided at model construction, the new model will have no optimizer, that is, an optimizer will have to be provided to the new model in the optimize! call.

Example

In the following example, a model model is constructed with a variable x and a constraint cref. It is then copied into a model new_model with the new references assigned to x_new and cref_new.

julia> model = Model();
 
 julia> @variable(model, x)
 x
@@ -3232,12 +3232,12 @@
 x
 
 julia> cref_new = model[:cref]
-cref : x = 2
source

Base.write(::IO, ::GenericModel; ::MOI.FileFormats.FileFormat)

Base.writeMethod
Base.write(
+cref : x = 2
source

Base.write(::IO, ::GenericModel; ::MOI.FileFormats.FileFormat)

Base.writeMethod
Base.write(
     io::IO,
     model::GenericModel;
     format::MOI.FileFormats.FileFormat = MOI.FileFormats.FORMAT_MOF,
     kwargs...,
-)

Write the JuMP model model to io in the format format.

Other kwargs are passed to the Model constructor of the chosen format.

source

MOI.Utilities.reset_optimizer(::GenericModel)

MathOptInterface.Utilities.reset_optimizerMethod
MOIU.reset_optimizer(model::GenericModel)

Call MOIU.reset_optimizer on the backend of model.

Cannot be called in direct mode.

source

MOI.Utilities.drop_optimizer(::GenericModel)

MathOptInterface.Utilities.drop_optimizerMethod
MOIU.drop_optimizer(model::GenericModel)

Call MOIU.drop_optimizer on the backend of model.

Cannot be called in direct mode.

source

MOI.Utilities.attach_optimizer(::GenericModel)

MathOptInterface.Utilities.attach_optimizerMethod
MOIU.attach_optimizer(model::GenericModel)

Call MOIU.attach_optimizer on the backend of model.

Cannot be called in direct mode.

source

@NLconstraint

JuMP.@NLconstraintMacro
@NLconstraint(model::GenericModel, expr)

Add a constraint described by the nonlinear expression expr. See also @constraint.

Compat

This macro is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling. In most cases, you can replace @NLconstraint with @constraint.

Example

julia> model = Model();
+)

Write the JuMP model model to io in the format format.

Other kwargs are passed to the Model constructor of the chosen format.

source

MOI.Utilities.reset_optimizer(::GenericModel)

MathOptInterface.Utilities.reset_optimizerMethod
MOIU.reset_optimizer(model::GenericModel)

Call MOIU.reset_optimizer on the backend of model.

Cannot be called in direct mode.

source

MOI.Utilities.drop_optimizer(::GenericModel)

MathOptInterface.Utilities.drop_optimizerMethod
MOIU.drop_optimizer(model::GenericModel)

Call MOIU.drop_optimizer on the backend of model.

Cannot be called in direct mode.

source

MOI.Utilities.attach_optimizer(::GenericModel)

MathOptInterface.Utilities.attach_optimizerMethod
MOIU.attach_optimizer(model::GenericModel)

Call MOIU.attach_optimizer on the backend of model.

Cannot be called in direct mode.

source

@NLconstraint

JuMP.@NLconstraintMacro
@NLconstraint(model::GenericModel, expr)

Add a constraint described by the nonlinear expression expr. See also @constraint.

Compat

This macro is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling. In most cases, you can replace @NLconstraint with @constraint.

Example

julia> model = Model();
 
 julia> @variable(model, x)
 x
@@ -3249,7 +3249,7 @@
 3-element Vector{NonlinearConstraintRef{ScalarShape}}:
  (sin(1.0 * x) - 1.0 / 1.0) - 0.0 ≤ 0
  (sin(2.0 * x) - 1.0 / 2.0) - 0.0 ≤ 0
- (sin(3.0 * x) - 1.0 / 3.0) - 0.0 ≤ 0
source

@NLconstraints

JuMP.@NLconstraintsMacro
@NLconstraints(model, args...)

Adds multiple nonlinear constraints to model at once, in the same fashion as the @NLconstraint macro.

The model must be the first argument, and multiple constraints can be added on multiple lines wrapped in a begin ... end block.

The macro returns a tuple containing the constraints that were defined.

Compat

This macro is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling. In most cases, you can replace @NLconstraints with @constraints.

Example

julia> model = Model();
+ (sin(3.0 * x) - 1.0 / 3.0) - 0.0 ≤ 0
source

@NLconstraints

JuMP.@NLconstraintsMacro
@NLconstraints(model, args...)

Adds multiple nonlinear constraints to model at once, in the same fashion as the @NLconstraint macro.

The model must be the first argument, and multiple constraints can be added on multiple lines wrapped in a begin ... end block.

The macro returns a tuple containing the constraints that were defined.

Compat

This macro is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling. In most cases, you can replace @NLconstraints with @constraints.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -3265,7 +3265,7 @@
            t >= sqrt(x^2 + y^2)
            [i = 1:2], z[i] <= log(a[i])
        end)
-((t - sqrt(x ^ 2.0 + y ^ 2.0)) - 0.0 ≥ 0, NonlinearConstraintRef{ScalarShape}[(z[1] - log(4.0)) - 0.0 ≤ 0, (z[2] - log(5.0)) - 0.0 ≤ 0])
source

@NLexpression

JuMP.@NLexpressionMacro
@NLexpression(args...)

Efficiently build a nonlinear expression which can then be inserted in other nonlinear constraints and the objective. See also [@expression].

Compat

This macro is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling. In most cases, you can replace @NLexpression with @expression.

Example

julia> model = Model();
+((t - sqrt(x ^ 2.0 + y ^ 2.0)) - 0.0 ≥ 0, NonlinearConstraintRef{ScalarShape}[(z[1] - log(4.0)) - 0.0 ≤ 0, (z[2] - log(5.0)) - 0.0 ≤ 0])
source

@NLexpression

JuMP.@NLexpressionMacro
@NLexpression(args...)

Efficiently build a nonlinear expression which can then be inserted in other nonlinear constraints and the objective. See also [@expression].

Compat

This macro is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling. In most cases, you can replace @NLexpression with @expression.

Example

julia> model = Model();
 
 julia> @variable(model, x)
 x
@@ -3286,7 +3286,7 @@
  subexpression[4]: sin(3.0 * x)
 
 julia> my_expr_2 = @NLexpression(model, log(1 + sum(exp(my_expr_1[i]) for i in 1:2)))
-subexpression[5]: log(1.0 + (exp(subexpression[2]) + exp(subexpression[3])))
source

@NLexpressions

JuMP.@NLexpressionsMacro
@NLexpressions(model, args...)

Adds multiple nonlinear expressions to model at once, in the same fashion as the @NLexpression macro.

The model must be the first argument, and multiple expressions can be added on multiple lines wrapped in a begin ... end block.

The macro returns a tuple containing the expressions that were defined.

Compat

This macro is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling. In most cases, you can replace @NLexpressions with @expressions.

Example

julia> model = Model();
+subexpression[5]: log(1.0 + (exp(subexpression[2]) + exp(subexpression[3])))
source

@NLexpressions

JuMP.@NLexpressionsMacro
@NLexpressions(model, args...)

Adds multiple nonlinear expressions to model at once, in the same fashion as the @NLexpression macro.

The model must be the first argument, and multiple expressions can be added on multiple lines wrapped in a begin ... end block.

The macro returns a tuple containing the expressions that were defined.

Compat

This macro is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling. In most cases, you can replace @NLexpressions with @expressions.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
@@ -3300,7 +3300,7 @@
            my_expr, sqrt(x^2 + y^2)
            my_expr_1[i = 1:2], log(a[i]) - z[i]
        end)
-(subexpression[1]: sqrt(x ^ 2.0 + y ^ 2.0), NonlinearExpression[subexpression[2]: log(4.0) - z[1], subexpression[3]: log(5.0) - z[2]])
source

@NLobjective

JuMP.@NLobjectiveMacro
@NLobjective(model, sense, expression)

Add a nonlinear objective to model with optimization sense sense. sense must be Max or Min.

Compat

This macro is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling. In most cases, you can replace @NLobjective with @objective.

Example

julia> model = Model();
+(subexpression[1]: sqrt(x ^ 2.0 + y ^ 2.0), NonlinearExpression[subexpression[2]: log(4.0) - z[1], subexpression[3]: log(5.0) - z[2]])
source

@NLobjective

JuMP.@NLobjectiveMacro
@NLobjective(model, sense, expression)

Add a nonlinear objective to model with optimization sense sense. sense must be Max or Min.

Compat

This macro is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling. In most cases, you can replace @NLobjective with @objective.

Example

julia> model = Model();
 
 julia> @variable(model, x)
 x
@@ -3309,7 +3309,7 @@
 
 julia> print(model)
 Max 2.0 * x + 1.0 + sin(x)
-Subject to
source

@NLparameter

JuMP.@NLparameterMacro
@NLparameter(model, param == value)

Create and return a nonlinear parameter param attached to the model model with initial value set to value. Nonlinear parameters may be used only in nonlinear expressions.

Example

julia> model = Model();
+Subject to
source

@NLparameter

JuMP.@NLparameterMacro
@NLparameter(model, param == value)

Create and return a nonlinear parameter param attached to the model model with initial value set to value. Nonlinear parameters may be used only in nonlinear expressions.

Example

julia> model = Model();
 
 julia> @NLparameter(model, x == 10)
 x == 10.0
@@ -3339,7 +3339,7 @@
  parameter[3] == 6.0
 
 julia> value(y[2])
-4.0
source

@NLparameters

JuMP.@NLparametersMacro
 @NLparameters(model, args...)

Create and return multiple nonlinear parameters attached to model model, in the same fashion as @NLparameter macro.

The model must be the first argument, and multiple parameters can be added on multiple lines wrapped in a begin ... end block. Distinct parameters need to be placed on separate lines as in the following example.

The macro returns a tuple containing the parameters that were defined.

Compat

This macro is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling. In most cases, you can replace a call like

@NLparameters(model, begin
+4.0
source

@NLparameters

JuMP.@NLparametersMacro
 @NLparameters(model, args...)

Create and return multiple nonlinear parameters attached to model model, in the same fashion as @NLparameter macro.

The model must be the first argument, and multiple parameters can be added on multiple lines wrapped in a begin ... end block. Distinct parameters need to be placed on separate lines as in the following example.

The macro returns a tuple containing the parameters that were defined.

Compat

This macro is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling. In most cases, you can replace a call like

@NLparameters(model, begin
     p == value
 end)

with

@variables(model, begin
     p in Parameter(value)
@@ -3351,17 +3351,17 @@
        end);
 
 julia> value(x)
-10.0
source

add_nonlinear_constraint

JuMP.add_nonlinear_constraintFunction
add_nonlinear_constraint(model::Model, expr::Expr)

Add a nonlinear constraint described by the Julia expression ex to model.

This function is most useful if the expression ex is generated programmatically, and you cannot use @NLconstraint.

Compat

This function is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling.

Notes

  • You must interpolate the variables directly into the expression expr.

Example

julia> model = Model();
+10.0
source

add_nonlinear_constraint

JuMP.add_nonlinear_constraintFunction
add_nonlinear_constraint(model::Model, expr::Expr)

Add a nonlinear constraint described by the Julia expression ex to model.

This function is most useful if the expression ex is generated programmatically, and you cannot use @NLconstraint.

Compat

This function is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling.

Notes

  • You must interpolate the variables directly into the expression expr.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
 julia> add_nonlinear_constraint(model, :($(x) + $(x)^2 <= 1))
-(x + x ^ 2.0) - 1.0 ≤ 0
source

add_nonlinear_expression

JuMP.add_nonlinear_expressionFunction
add_nonlinear_expression(model::Model, expr::Expr)

Add a nonlinear expression expr to model.

This function is most useful if the expression expr is generated programmatically, and you cannot use @NLexpression.

Compat

This function is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling.

Notes

  • You must interpolate the variables directly into the expression expr.

Example

julia> model = Model();
+(x + x ^ 2.0) - 1.0 ≤ 0
source

add_nonlinear_expression

JuMP.add_nonlinear_expressionFunction
add_nonlinear_expression(model::Model, expr::Expr)

Add a nonlinear expression expr to model.

This function is most useful if the expression expr is generated programmatically, and you cannot use @NLexpression.

Compat

This function is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling.

Notes

  • You must interpolate the variables directly into the expression expr.

Example

julia> model = Model();
 
 julia> @variable(model, x);
 
 julia> add_nonlinear_expression(model, :($(x) + $(x)^2))
-subexpression[1]: x + x ^ 2.0
source

add_nonlinear_parameter

JuMP.add_nonlinear_parameterFunction
add_nonlinear_parameter(model::Model, value::Real)

Add an anonymous parameter to the model.

Compat

This function is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling.

source

all_nonlinear_constraints

JuMP.all_nonlinear_constraintsFunction
all_nonlinear_constraints(model::GenericModel)

Return a vector of all nonlinear constraint references in the model in the order they were added to the model.

Compat

This function is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling.

This function returns only the constraints added with @NLconstraint and add_nonlinear_constraint. It does not return GenericNonlinearExpr constraints.

source

get_optimizer_attribute

JuMP.get_optimizer_attributeFunction
get_optimizer_attribute(
+subexpression[1]: x + x ^ 2.0
source

add_nonlinear_parameter

JuMP.add_nonlinear_parameterFunction
add_nonlinear_parameter(model::Model, value::Real)

Add an anonymous parameter to the model.

Compat

This function is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling.

source

all_nonlinear_constraints

JuMP.all_nonlinear_constraintsFunction
all_nonlinear_constraints(model::GenericModel)

Return a vector of all nonlinear constraint references in the model in the order they were added to the model.

Compat

This function is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling.

This function returns only the constraints added with @NLconstraint and add_nonlinear_constraint. It does not return GenericNonlinearExpr constraints.

source

get_optimizer_attribute

JuMP.get_optimizer_attributeFunction
get_optimizer_attribute(
     model::Union{GenericModel,MOI.OptimizerWithAttributes},
     attr::Union{AbstractString,MOI.AbstractOptimizerAttribute},
 )

Return the value associated with the solver-specific attribute attr.

If attr is an AbstractString, this is equivalent to get_optimizer_attribute(model, MOI.RawOptimizerAttribute(name)).

Compat

This method will remain in all v1.X releases of JuMP, but it may be removed in a future v2.0 release. We recommend using get_attribute instead.

See also: set_optimizer_attribute, set_optimizer_attributes.

Example

julia> import Ipopt
@@ -3369,18 +3369,18 @@
 julia> model = Model(Ipopt.Optimizer);
 
 julia> get_optimizer_attribute(model, MOI.Silent())
-false
source

nonlinear_constraint_string

JuMP.nonlinear_constraint_stringFunction
nonlinear_constraint_string(
+false
source

nonlinear_constraint_string

JuMP.nonlinear_constraint_stringFunction
nonlinear_constraint_string(
     model::GenericModel,
     mode::MIME,
     c::_NonlinearConstraint,
-)

Return a string representation of the nonlinear constraint c belonging to model, given the mode.

Compat

This function is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling.

source

nonlinear_dual_start_value

JuMP.nonlinear_dual_start_valueFunction
nonlinear_dual_start_value(model::Model)

Return the current value of the MOI attribute MOI.NLPBlockDualStart.

Compat

This function is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling.

source

nonlinear_expr_string

JuMP.nonlinear_expr_stringFunction
nonlinear_expr_string(
+)

Return a string representation of the nonlinear constraint c belonging to model, given the mode.

Compat

This function is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling.

source

nonlinear_dual_start_value

JuMP.nonlinear_dual_start_valueFunction
nonlinear_dual_start_value(model::Model)

Return the current value of the MOI attribute MOI.NLPBlockDualStart.

Compat

This function is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling.

source

nonlinear_expr_string

JuMP.nonlinear_expr_stringFunction
nonlinear_expr_string(
     model::GenericModel,
     mode::MIME,
     c::MOI.Nonlinear.Expression,
-)

Return a string representation of the nonlinear expression c belonging to model, given the mode.

Compat

This function is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling.

source

nonlinear_model

JuMP.nonlinear_modelFunction
nonlinear_model(
+)

Return a string representation of the nonlinear expression c belonging to model, given the mode.

Compat

This function is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling.

source

nonlinear_model

JuMP.nonlinear_modelFunction
nonlinear_model(
     model::GenericModel;
     force::Bool = false,
-)::Union{MOI.Nonlinear.Model,Nothing}

If model has nonlinear components, return a MOI.Nonlinear.Model, otherwise return nothing.

If force, always return a MOI.Nonlinear.Model, and if one does not exist for the model, create an empty one.

Compat

This function is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling.

source

num_nonlinear_constraints

JuMP.num_nonlinear_constraintsFunction
num_nonlinear_constraints(model::GenericModel)

Returns the number of nonlinear constraints associated with the model.

Compat

This function is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling.

This function counts only the constraints added with @NLconstraint and add_nonlinear_constraint. It does not count GenericNonlinearExpr constraints.

source

register

JuMP.registerFunction
register(
+)::Union{MOI.Nonlinear.Model,Nothing}

If model has nonlinear components, return a MOI.Nonlinear.Model, otherwise return nothing.

If force, always return a MOI.Nonlinear.Model, and if one does not exist for the model, create an empty one.

Compat

This function is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling.

source

num_nonlinear_constraints

JuMP.num_nonlinear_constraintsFunction
num_nonlinear_constraints(model::GenericModel)

Returns the number of nonlinear constraints associated with the model.

Compat

This function is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling.

This function counts only the constraints added with @NLconstraint and add_nonlinear_constraint. It does not count GenericNonlinearExpr constraints.

source

register

JuMP.registerFunction
register(
     model::Model,
     op::Symbol,
     dimension::Integer,
@@ -3408,7 +3408,7 @@
 
 julia> register(model, :g, 2, g; autodiff = true)
 
-julia> @NLobjective(model, Min, g(x[1], x[2]))
source
register(
+julia> @NLobjective(model, Min, g(x[1], x[2]))
source
register(
     model::Model,
     s::Symbol,
     dimension::Integer,
@@ -3447,7 +3447,7 @@
 
 julia> register(model, :g, 2, g, ∇g)
 
-julia> @NLobjective(model, Min, g(x[1], x[2]))
source
register(
+julia> @NLobjective(model, Min, g(x[1], x[2]))
source
register(
     model::Model,
     s::Symbol,
     dimension::Integer,
@@ -3471,7 +3471,7 @@
 julia> register(model, :foo, 1, f, ∇f, ∇²f)
 
 julia> @NLobjective(model, Min, foo(x))
-
source

set_nonlinear_dual_start_value

JuMP.set_nonlinear_dual_start_valueFunction
set_nonlinear_dual_start_value(
+
source

set_nonlinear_dual_start_value

JuMP.set_nonlinear_dual_start_valueFunction
set_nonlinear_dual_start_value(
     model::Model,
     start::Union{Nothing,Vector{Float64}},
 )

Set the value of the MOI attribute MOI.NLPBlockDualStart.

Compat

This function is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling.

The start vector corresponds to the Lagrangian duals of the nonlinear constraints, in the order given by all_nonlinear_constraints. That is, you must pass a single start vector corresponding to all of the nonlinear constraints in a single function call; you cannot set the dual start value of nonlinear constraints one-by-one. The example below demonstrates how to use all_nonlinear_constraints to create a mapping between the nonlinear constraint references and the start vector.

Pass nothing to unset a previous start.

Example

julia> model = Model();
@@ -3494,7 +3494,7 @@
 julia> nonlinear_dual_start_value(model)
 2-element Vector{Float64}:
  -1.0
-  1.0
source

set_nonlinear_objective

JuMP.set_nonlinear_objectiveFunction
set_nonlinear_objective(
+  1.0
source

set_nonlinear_objective

JuMP.set_nonlinear_objectiveFunction
set_nonlinear_objective(
     model::Model,
     sense::MOI.OptimizationSense,
     expr::Expr,
@@ -3502,17 +3502,17 @@
 
 julia> @variable(model, x);
 
-julia> set_nonlinear_objective(model, MIN_SENSE, :($(x) + $(x)^2))
source

set_normalized_coefficients

JuMP.set_normalized_coefficientsFunction
set_normalized_coefficients(
+julia> set_nonlinear_objective(model, MIN_SENSE, :($(x) + $(x)^2))
source

set_normalized_coefficients

JuMP.set_normalized_coefficientsFunction
set_normalized_coefficients(
     constraint::ConstraintRef{<:AbstractModel,<:MOI.ConstraintIndex{F}},
     variable::AbstractVariableRef,
     new_coefficients::Vector{Tuple{Int64,T}},
-) where {T,F<:Union{MOI.VectorAffineFunction{T},MOI.VectorQuadraticFunction{T}}}

A deprecated method that now redirects to set_normalized_coefficient.

source

set_optimizer_attribute

JuMP.set_optimizer_attributeFunction
set_optimizer_attribute(
+) where {T,F<:Union{MOI.VectorAffineFunction{T},MOI.VectorQuadraticFunction{T}}}

A deprecated method that now redirects to set_normalized_coefficient.

source

set_optimizer_attribute

JuMP.set_optimizer_attributeFunction
set_optimizer_attribute(
     model::Union{GenericModel,MOI.OptimizerWithAttributes},
     attr::Union{AbstractString,MOI.AbstractOptimizerAttribute},
     value,
 )

Set the solver-specific attribute attr in model to value.

If attr is an AbstractString, this is equivalent to set_optimizer_attribute(model, MOI.RawOptimizerAttribute(name), value).

Compat

This method will remain in all v1.X releases of JuMP, but it may be removed in a future v2.0 release. We recommend using set_attribute instead.

See also: set_optimizer_attributes, get_optimizer_attribute.

Example

julia> model = Model();
 
-julia> set_optimizer_attribute(model, MOI.Silent(), true)
source

set_optimizer_attributes

JuMP.set_optimizer_attributesFunction
set_optimizer_attributes(
+julia> set_optimizer_attribute(model, MOI.Silent(), true)
source

set_optimizer_attributes

JuMP.set_optimizer_attributesFunction
set_optimizer_attributes(
     model::Union{GenericModel,MOI.OptimizerWithAttributes},
     pairs::Pair...,
 )

Given a list of attribute => value pairs, calls set_optimizer_attribute(model, attribute, value) for each pair.

Compat

This method will remain in all v1.X releases of JuMP, but it may be removed in a future v2.0 release. We recommend using set_attributes instead.

See also: set_optimizer_attribute, get_optimizer_attribute.

Example

julia> import Ipopt
@@ -3525,7 +3525,7 @@
 
 julia> set_optimizer_attribute(model, "tol", 1e-4)
 
-julia> set_optimizer_attribute(model, "max_iter", 100)
source

set_value

JuMP.set_valueFunction
set_value(p::NonlinearParameter, v::Number)

Store the value v in the nonlinear parameter p.

Compat

This function is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling.

Example

julia> model = Model();
+julia> set_optimizer_attribute(model, "max_iter", 100)
source

set_value

JuMP.set_valueFunction
set_value(p::NonlinearParameter, v::Number)

Store the value v in the nonlinear parameter p.

Compat

This function is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling.

Example

julia> model = Model();
 
 julia> @NLparameter(model, p == 0)
 p == 0.0
@@ -3534,4 +3534,4 @@
 5
 
 julia> value(p)
-5.0
source

NonlinearConstraintIndex

JuMP.NonlinearConstraintIndexType
ConstraintIndex

An index to a nonlinear constraint that is returned by add_constraint.

Given data::Model and c::ConstraintIndex, use data[c] to retrieve the corresponding Constraint.

source

NonlinearConstraintRef

JuMP.NonlinearConstraintRefType
NonlinearConstraintRef
Compat

This type is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling.

source

NonlinearExpression

JuMP.NonlinearExpressionType
NonlinearExpression <: AbstractJuMPScalar

A struct to represent a nonlinear expression.

Create an expression using @NLexpression.

Compat

This type is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling.

source

NonlinearParameter

JuMP.NonlinearParameterType
NonlinearParameter <: AbstractJuMPScalar

A struct to represent a nonlinear parameter.

Create a parameter using @NLparameter.

Compat

This type is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling.

source
+5.0source

NonlinearConstraintIndex

JuMP.NonlinearConstraintIndexType
ConstraintIndex

An index to a nonlinear constraint that is returned by add_constraint.

Given data::Model and c::ConstraintIndex, use data[c] to retrieve the corresponding Constraint.

source

NonlinearConstraintRef

JuMP.NonlinearConstraintRefType
NonlinearConstraintRef
Compat

This type is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling.

source

NonlinearExpression

JuMP.NonlinearExpressionType
NonlinearExpression <: AbstractJuMPScalar

A struct to represent a nonlinear expression.

Create an expression using @NLexpression.

Compat

This type is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling.

source

NonlinearParameter

JuMP.NonlinearParameterType
NonlinearParameter <: AbstractJuMPScalar

A struct to represent a nonlinear parameter.

Create a parameter using @NLparameter.

Compat

This type is part of the legacy nonlinear interface. Consider using the new nonlinear interface documented in Nonlinear Modeling.

source
diff --git a/dev/background/algebraic_modeling_languages/index.html b/dev/background/algebraic_modeling_languages/index.html index 4552a6e1b12..80e7dd71e23 100644 --- a/dev/background/algebraic_modeling_languages/index.html +++ b/dev/background/algebraic_modeling_languages/index.html @@ -138,4 +138,4 @@ julia> highs_knapsack([1.0, 2.0], [0.5, 0.5], 1.25) 2-element Vector{Float64}: 0.0 - 2.0

We've now gone from a algebraic model that looked identical to the mathematical model we started with, to a verbose function that uses HiGHS-specific functionality.

The difference between algebraic_knapsack and highs_knapsack highlights the benefit that algebraic modeling languages provide to users. Moreover, if we used a different solver, the solver-specific function would be entirely different. A key benefit of an algebraic modeling language is that you can change the solver without needing to rewrite the model.

+ 2.0

We've now gone from a algebraic model that looked identical to the mathematical model we started with, to a verbose function that uses HiGHS-specific functionality.

The difference between algebraic_knapsack and highs_knapsack highlights the benefit that algebraic modeling languages provide to users. Moreover, if we used a different solver, the solver-specific function would be entirely different. A key benefit of an algebraic modeling language is that you can change the solver without needing to rewrite the model.

diff --git a/dev/background/bibliography/index.html b/dev/background/bibliography/index.html index aa3b1959619..0dd943f0cd3 100644 --- a/dev/background/bibliography/index.html +++ b/dev/background/bibliography/index.html @@ -3,4 +3,4 @@ function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'G-0RZ8X3D3D0', {'page_path': location.pathname + location.search + location.hash}); -

Bibliography

+

Bibliography

diff --git a/dev/changelog/index.html b/dev/changelog/index.html index a0473bf306f..9f6cfd3b521 100644 --- a/dev/changelog/index.html +++ b/dev/changelog/index.html @@ -3,13 +3,13 @@ function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'G-0RZ8X3D3D0', {'page_path': location.pathname + location.search + location.hash}); -

Release notes

The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.

Version 1.23.4 (November 8, 2024)

Fixed

  • Fixed UnsupportedNonlinearOperator error for the single argument LinearAlgebra.norm (#3864)
  • Fixed printing MOI.Interval with MIME"text/latex" (#3866)

Other

  • Various minor improvements to the documentation (#3855) (#3860)
  • Added MathOptAI.jl and MathOptSymbolicAD.jl to the list of extensions in the documentation (#3858)
  • Clarified add_to_expression! can add two expressions (#3859)
  • Added SHOT to the installation table (#3853)
  • Improvements to test coverage (#3867) (#3868) (#3869) (#3870) (#3871) (#3872) (#3873) (#3874) (#3875)
  • JuMP now uses MOI.add_constrained_variable when adding a scalar variable with bounds for improving model creation performance with some solvers (#3863) (#3865)

Version 1.23.3 (October 21, 2024)

Fixed

  • Fixed a printing bug with scientific numbers in MIME"text/latex" (#3838)
  • Fixed support for AbstractString in set_attribute (#3840)
  • Fixed a bug reporting vector-valued duals in solution_summary (#3846)
  • Fixed solution_summary when there are duplicate variable and constraint names (#3848)

Other

Version 1.23.2 (September 13, 2024)

Fixed

  • Fixed an illegal simplification in MA.operate!! for NonlinearExpr (#3826)

Other

  • Added Rolling horizon problems tutorial (#3815)
  • Added more tests for shapes and dual shapes (#3816)
  • Added more packages to extension-tests.yml (#3817) (#3818)
  • Removed an unnecessary test(#3819)
  • Documentation improvements (#3820) (#3822) (#3823)
  • Added PiecewiseLinearOpt.jl to the docs (#3824)

Version 1.23.1 (August 30, 2024)

Fixed

  • Fixed a bug with indicator constraints and the in set syntax (#3813)

Other

Version 1.23.0 (August 13, 2024)

Added

  • Added set inequality syntax for matrices (#3766)
  • Improved matrix inequality support (#3778) (#3805)

Fixed

  • Fixed a method for calling value on a ::Number (#3776)
  • Fixed querying dual of Symmetric and Hermitian equality constraints (#3797)
  • Fixed read_from_file for coefficient types other than Float64 (#3801)

Other

  • Documentation improvements
    • Fixed missing character in installation instructions (#3777)
    • Added a section of querying the Jacobian (#3779)
    • Clarify that SCIP does not support lazy constraints (#3784)
    • Fixed typo in knapsack.jl (#3792)
    • Added a warning to docs about tolerances in Bin and Int variables (#3794)
    • Clarify where to type installation commands (#3795)
  • Improve error message for common incorrect syntax in constraint macro (#3781)
  • Changed show(::IO, ::GenericModel) to a more informative tree structure (#3803)

Version 1.22.2 (June 17, 2024)

Fixed

  • Fixed printing to omit terms when printing a large array of expressions (#3759)
  • Fixed bug in printing when show is called on an invalid variable or constraint (#3763)

Other

  • Improved error message for unsupported kwargs in variable macro (#3751)
  • Improved error message for unsupported container syntax like x[A][B] (#3756)
  • Docstring improvements (#3758), (#3760), (#3761), (#3767)
  • Added warning to documentation about Y <= X, Set() syntax (#3769)
  • Work-around change on nightly (#3753), (#3754)
  • Improved printing of symmetric matrices when used in constraints (#3768)
  • Fixed a test for upcoming printing change in MOI (#3772)
  • Updated should_i_use.md (#3773)

Version 1.22.1 (May 17, 2024)

Fixed

  • Fixed bug including non-.jl files in src/macros.jl (#3747)

Other

  • Added DSDP to the list of supported solvers (#3745)
  • Updated YALMIP migration guide (#3748)

Version 1.22.0 (May 12, 2024)

Added

  • Added Base.complex(r, i) where r and i may be real-valued variables or affine or quadratic expressions (#3734)
  • Added @force_nonlinear for controlling when affine and quadratic expressions are instead parsed as nonlinear expressions. This can be useful for advanced users in a limited set of circumstances. (#3732)
  • Added support for returning the variable coefficients of a vector-valued constraint via normalized_coefficient. In addition, set_normalized_coefficients has been softly deprecated (no warning is thrown and old code will still work for all future 1.X releases of JuMP) in favor of set_normalized_coefficient. This change was made to unify how we get and set variable coefficients. (#3743)

Fixed

  • Fixed missing promote_operation method that resulted in slow code (#3730)
  • Improved performance of getindex for Containers.DenseAxisArray (#3731)
  • Fixed the error message when the legacy nonlinear API is mixed with the new nonlinear API. In particular, we now uniformly throw an error message when unexpected objects occur in nonlinear expressions. (#3741)

Other

  • Updated documentation (#3727), (#3728), (#3739)
  • Updated versions in GitHub actions (#3735)

Version 1.21.1 (April 11, 2024)

Fixed

  • Fixed behavior of complex-value related functions like real, imag, conj and abs2 when called on GenericNonlinearExpr. This fixes a method error when calling x' where x is an array of nonlinear expressions. As a related consequence, we now always error when creating nonlinear expressions with complex components. Previously, only some constructors were checked for complex expressionns. (#3724)

Other

  • Documentation improvements (#3719) (#3720) (#3721) (#3722)

Version 1.21.0 (March 31, 2024)

Added

  • Added support for matrix inequality constraints with the HermitianPSDCone (#3705)
  • Added batched modification methods for set_normalized_rhs, set_objective_coefficient and set_normalized_coefficient. Using these methods can be more efficient for some solvers (#3716)
  • Added the private constant _CONSTRAINT_LIMIT_FOR_PRINTING, which controls how many constraints are printed to the screen during print(model). The main purpose of this is to prevent large quantities of text being printed when print(model) is accidentally called on a large model. (#3686)

Fixed

  • Changed Containers.SparseAxisArray to use an OrderedDict as the backing data structure. Iterating over the elements in a SparseAxisArray now iterates in the order that the elements were created. Previously, the order was undefined behavior. (#3681)
  • Fixed complex variables for non-Float64 coefficient types (#3691)
  • Fixed LinearAlgebra.hermitan(::AbstractJuMPScalar) (#3693)
  • Fixed multiplying real scalar by Hermitian matrix (#3695)

Other

Version 1.20.0 (February 15, 2024)

Added

Fixed

  • Fixed compat of DimensionalData (#3666)
  • Fixed convert(::Type{NonlinearExpr}, ::Number)(#3672)

Other

  • Added Optim to list of solvers (#3624)
  • Improved linking within documentation (#3669)

Version 1.19.0 (February 1, 2024)

Added

  • Added support for modifying quadratic coefficients (#3658)

Fixed

  • Fixed short circuiting of && and || in macros (#3655)

Other

  • Added SDPLR to list of solvers (#3644)
  • Added new roadmap items (#3645)
  • Fixed vale.sh version (#3650)
  • Improve error messages in macros (#3653)
  • Refactoring of set_normalized_coefficient (#3660) (#3661)
  • Update docs/packages.toml (#3662)

Version 1.18.1 (January 6, 2024)

Fixed

  • Fixed escaping of the set keyword in @variable (#3647)

Version 1.18.0 (January 2, 2024)

Added

  • This release includes a large refactoring of the macro code that closes a roadmap item (#3629) Contributing pull requests include (#3600), (#3603), (#3606), (#3607), (#3610), (#3611), (#3612), (#3613), (#3614), (#3615), (#3617), (#3618), (#3619), (#3620), (#3621), (#3631), (#3632), (#3633)

Fixed

  • Fixed error for unsupported objective sense (#3601)
  • Fixed text/latex printing of GenericNonlinearExpr (#3609)
  • Fixed compat bounds of stdlib packages (#3626)
  • Fixed a bug that can accidentally modify the user's expressions in a macro (#3639)
  • Fixed a bug converting AffExpr to GenericNonlinearExpr (#3642)

Other

  • Added DisjunctiveProgrammingto extension-tests (#3597)
  • Added DisjunctiveProgrammingto docs (#3598)
  • Added DocumenterCitations to the docs (#3596), (#3630)
  • Migrate from SnoopPrecompile to PrecompileTools (#3608)
  • Minor documentation updates (#3623), (#3628), (#3635), (#3640), (#3643)

Version 1.17.0 (December 4, 2023)

Added

Fixed

  • Fixed variable_ref_type for unsupported types (#3556)
  • Fixed convert type of constraint starting values (#3571)
  • Fixed various methods to support AbstractJuMPScalar with Distances.jl (#3583)
  • Fixed eachindex for multiple arguments of Containers.DenseAxisArray and Containers.SparseAxisArray (#3587)
  • Expressions with more than 60 terms now print in truncated form. This prevents large expressions from being accidentally printed to terminal or IJulia output (#3575)
  • Fixed a type instability in set_objective_coefficient (#3590)
  • Various fixes to the documentation (#3593) (#3595)

Other

  • Improved error messages for:
    • Addition and subtraction between a matrix and a scalar (#3557) (#3558)
    • Variables with non-constant bounds (#3583)
    • Invalid indicator constraints (#3584)
  • Added new solvers to the documentation:
    • EAGO.jl (#3560) (#3561)
    • Manopt.jl (#3568)
    • Percival.jl (#3567)
  • Added new tutorials:
  • Improved documentation for:
  • Updated package compat bounds (#3578)

Version 1.16.0 (October 24, 2023)

Added

  • Added := operator for Boolean satisfiability problems (#3530)

Fixed

  • Fixed text/latex printing of MOI.Interval sets (#3537)
  • Fixed tests with duplicate function names (#3539)

Other

  • Updated documentation list of supported solvers (#3527) (#3529) (#3538) (#3542) (#3545) (#3546)
  • Updated to Documenter@1.1 (#3528)
  • Fixed various tutorials (#3534) (#3532)
  • Fixed Project.toml compat bounds for standard libraries (#3544)

Version 1.15.1 (September 24, 2023)

Fixed

  • Fixed support for single argument min and max operators (#3522)
  • Fixed error message for add_to_expression! when called with a GenericNonlinearExpr (#3506)
  • Fixed constraint tags with broadcasted constraints (#3515)
  • Fixed MethodError in MA.scaling (#3518)
  • Fixed support for arrays of Parameter variables (#3524)

Other

  • Updated to Documenter@1 (#3501)
  • Fixed links to data in tutorials (#3512)
  • Fixed typo in TSP tutorial (#3516)
  • Improved error message for VariableNotOwned errors (#3520)
  • Fixed various JET errors (#3519)

Version 1.15.0 (September 15, 2023)

This is a large minor release because it adds an entirely new data structure and API path for working with nonlinear programs. The previous nonlinear interface remains unchanged and is documented at Nonlinear Modeling (Legacy). The new interface is a treated as a non-breaking feature addition and is documented at Nonlinear Modeling.

Breaking

Although the new nonlinear interface is a feature addition, there are two changes which might be breaking for a very small number of users.

  • The syntax inside JuMP macros is parsed using a different code path, even for linear and quadratic expressions. We made this change to unify how we parse linear, quadratic, and nonlinear expressions. In all cases, the new code returns equivalent expressions, but because of the different order of operations, there are three changes to be aware of when updating:
    • The printed form of the expression may change, for example from x * y to y * x. This can cause tests which test the String representation of a model to fail.
    • Some coefficients may change slightly due to floating point round-off error.
    • Particularly when working with a JuMP extension, you may encounter a MethodError due to a missing or ambiguous method. These errors are due to previously existing bugs that were not triggered by the previous parsing code. If you encounter such an error, please open a GitHub issue.
  • The methods for Base.:^(x::VariableRef, n::Integer) and Base.:^(x::AffExpr, n::Integer) have changed. Previously, these methods supported only n = 0, 1, 2 and they always returned a QuadExpr, even for the case when n = 0 or n = 1. Now:
    • x^0 returns one(T), where T is the value_type of the model (defaults to Float64)
    • x^1 returns x
    • x^2 returns a QuadExpr
    • x^n where !(0 <= n <= 2) returns a NonlinearExpr.
    We made this change to support nonlinear expressions and to align the mathematical definition of the operation with their return type. (Previously, users were surprised that x^1 returned a QuadExpr.) As a consequence of this change, the methods are now not type-stable. This means that the compiler cannot prove that x^2 returns a QuadExpr. If benchmarking shows that this is a performance problem, you can use the type-stable x * x instead of x^2.

Added

Fixed

  • Fixed uses of @nospecialize which cause precompilation failures in Julia v1.6.0 and v1.6.1. (#3464)
  • Fixed adding a container of Parameter (#3473)
  • Fixed return type of x^0 and x^1 to no longer return QuadExpr (see note in Breaking section above) (#3474)
  • Fixed error messages in LowerBoundRef, UpperBoundRef, FixRef, IntegerRef, BinaryRef, ParameterRef and related functions (#3494)
  • Fixed type inference of empty containers in JuMP macros (#3500)

Other

  • Added GAMS to solver documentation (#3357)
  • Updated various tutorials (#3459) (#3460) (#3462) (#3463) (#3465) (#3490) (#3492) (#3503)
  • Added The network multi-commodity flow problem tutorial (#3491)
  • Added Two-stage stochastic programs tutorial (#3466)
  • Added better error messages for unsupported operations in LinearAlgebra (#3476)
  • Updated to the latest version of Documenter (#3484) (#3495) (#3497)
  • Updated GitHub action versions (#3507)

Version 1.14.1 (September 2, 2023)

Fixed

  • Fix links in Documentation (#3478)

Version 1.14.0 (August 27, 2023)

Added

Fixed

  • Fixed model_convert for BridgeableConstraint (#3437)
  • Fixed printing models with integer coefficients larger than typemax(Int) (#3447)
  • Fixed support for constant left-hand side functions in a complementarity constraint (#3452)

Other

  • Updated packages used in documentation (#3444) (#3455)
  • Fixed docstring tests (#3445)
  • Fixed printing change for MathOptInterface (#3446)
  • Fixed typos in documentation (#3448) (#3457)
  • Added SCIP to callback documentation (#3449)

Version 1.13.0 (July 27, 2023)

Added

Fixed

Other

  • Added Loraine.jl to the installation table (#3426)
  • Removed Penopt.jl from packages.toml (#3428)
  • Improved problem statement in cannery example of tutorial (#3430)
  • Minor cleanups in Containers.DenseAxisArray implementation (#3429)
  • Changed nested_problems.jl: outer/inner to upper/lower (#3433)
  • Removed second SDP relaxation in OPF tutorial (#3432)

Version 1.12.0 (June 19, 2023)

Added

Fixed

  • Fixed error message for matrix in HermitianPSDCone (#3369)
  • Fixed EditURL for custom documentation pages (#3373)
  • Fixed return type annotations for MOI.ConstraintPrimal and MOI.ConstraintDual (#3381)
  • Fixed printing change in Julia nightly (#3391)
  • Fixed printing of Complex coefficients (#3397)
  • Fixed printing of constraints in text/latex mode (#3405)
  • Fixed performance issue in Containers.rowtable (#3410)
  • Fixed bug when variables added to set of wrong dimension (#3411)

Other

  • Added more solver READMEs to the documentation (#3358) (#3360) (#3364) (#3365) (#3366) (#3368) (#3372) (#3374) (#3376) (#3379) (#3387) (#3389)
  • Added StatusSwitchingQP.jl to the installation table (#3354)
  • Updated checklist for adding a new solver (#3370)
  • Updated extension-tests.yml action (#3371) (#3375)
  • Color logs in GitHub actions (#3392)
  • Added new tutorials
  • Updated JuMP paper citation (#3400)
  • Changed GitHub action to upload LaTeX logs when building documentation (#3403)
  • Fixed printing of SCS log in documentation (#3406)
  • Updated solver versions (#3407)
  • Updated documentation to use Julia v1.9 (#3398)
  • Replaced _value_type with MOI.Utilities.value_type (#3414)
  • Fixed a typo in docstring (#3415)
  • Refactored API documentation (#3386)
  • Updated SCIP license (#3420)

Version 1.11.1 (May 19, 2023)

Fixed

  • Fixed a poor error message when sum(::DenseAxisArray; dims) was called (#3338)
  • Fixed support for dependent sets in the @variable macro (#3344)
  • Fixed a performance bug in constraints with sparse symmetric matrices (#3349)

Other

  • Improved the printing of complex numbers (#3332)
  • When printing, sets which contain constants ending in .0 now print as integers. This follows the behavior of constants in functions (#3341)
  • Added InfiniteOpt to the extensions documentation (#3343)
  • Added more documentation for the exponential cone (#3345) (#3347)
  • Added checklists for developers (#3346) (#3355)
  • Fixed test support upcoming Julia nightly (#3351)
  • Fixed extension-tests.yml action (#3353)
  • Add more solvers to the documentation (#3359) (#3361) (#3362)

Version 1.11.0 (May 3, 2023)

Added

  • Added new methods to print_active_bridges for printing a particular objective, constraint, or variable (#3316)

Fixed

  • Fixed tests for MOI v1.14.0 release (#3312)
  • Fixed indexing containers when an axis is Vector{Any} that contains a Vector{Any} element (#3280)
  • Fixed getindex(::AbstractJuMPScalar) which is called for an expression like x[] (#3314)
  • Fixed bug in set_string_names_on_creation with a vector of variables (#3322)
  • Fixed bug in memoize function in nonlinear documentation (#3337)

Other

  • Fixed typos in the documentation (#3317) (#3318) (#3328)
  • Added a test for the order of setting start values (#3315)
  • Added READMEs of solvers and extensions to the docs (#3309) (#3320) (#3327) (#3329) (#3333)
  • Style improvements to src/variables.jl (#3324)
  • Clarify that column generation does not find global optimum (#3325)
  • Add a GitHub actions workflow for testing extensions prior to release (#3331)
  • Document the release process for JuMP (#3334)
  • Fix links to discourse and chatroom (#3335)

Version 1.10.0 (April 3, 2023)

Added

  • Added Nonnegatives, Nonpositives and Zeros, and support vector-valued inequality syntax in the JuMP macros (#3273)
  • Added special support for LinearAlgebra.Symmetric and LinearAlgebra.Hermitian matrices in Zeros constraints (#3281) (#3296)
  • Added HermitianMatrixSpace and the Hermitian tag for generating a matrix of variables that is Hermitian (#3292) (#3293)
  • Added Semicontinuous and Semiinteger (#3302)
  • Added support for keyword indexing of containers (#3237)

Fixed

  • Fixed [compat] bound for MathOptInterface in Project.toml (#3272)

Other

  • Split out the Nested optimization problems tutorial (#3274)
  • Updated doctests to ensure none have hidden state (#3275) (#3276)
  • Clarified how lazy constraints may revisit points (#3278)
  • Added P-Norm example (#3282)
  • Clarified docs that macros create new bindings (#3284)
  • Fixed threading example (#3283)
  • Added plot to The minimum distortion problem (#3288)
  • Added Google style rules for Vale and fixed warnings (#3285)
  • Added citation for the JuMP 1.0 paper (#3294)
  • Updated package versions in the documentation (#3298)
  • Added comment for the order in which start values must be set (#3303)
  • Improved error message for unrecognized constraint operators (#3311)

Version 1.9.0 (March 7, 2023)

Added

Fixed

  • The matrix returned by a variable in HermitianPSDCone is now a LinearAlgebra.Hermitian matrix. This is potentially breaking if you have written code to assume the return is a Matrix. (#3245) (#3246)
  • Fixed missing support for Base.isreal of expressions (#3252)

Other

  • Fixed a thread safety issue in the Parallelism tutorial (#3240) (#3243)
  • Improved the error message when unsupported operators are used in @NL macros (#3236)
  • Clarified the documentation to say that matrices in HermitianPSDCone must be LinearAlgebra.Hermitian (#3241)
  • Minor style fixes to internal macro code (#3247)
  • Add Example: quantum state discrimination tutorial (#3250)
  • Improve error message when begin...end not passed to plural macros (#3255)
  • Document how to register function with varying number of input arguments (#3258)
  • Tidy tests by removing unneeded JuMP. prefixes (#3260)
  • Clarified the introduction to the Complex number support tutorial (#3262)
  • Fixed typos in the Documentation (#3263) (#3266) (#3268) (#3269)

Version 1.8.2 (February 27, 2023)

Fixed

  • Fixed dot product between complex JuMP expression and number (#3244)

Other

  • Polish simple SDP examples (#3232)

Version 1.8.1 (February 23, 2023)

Fixed

  • Fixed support for init in nonlinear generator expressions (#3226)

Other

  • Use and document import MathOptInterface as MOI (#3222)
  • Removed references in documentation to multiobjective optimization being unsupported (#3223)
  • Added tutorial on multi-objective portfolio optimization (#3227)
  • Refactored some of the conic tutorials (#3229)
  • Fixed typos in the documentation (#3230)
  • Added tutorial on parallelism (#3231)

Version 1.8.0 (February 16, 2023)

Added

  • Added --> syntax support for indicator constraints. The old syntax of => remains supported (#3207)
  • Added <--> syntax for reified constraints. For now, few solvers support reified constraints (#3206)
  • Added fix_discrete_variables. This is most useful for computing the dual of a mixed-integer program (#3208)
  • Added support for vector-valued objectives. For details, see the Multi-objective knapsack tutorial (#3176)

Fixed

  • Fixed a bug in lp_sensitivity_report by switching to an explicit LU factorization of the basis matrix (#3182)
  • Fixed a bug that prevented [; kwarg] arguments in macros (#3220)

Other

Version 1.7.0 (January 25, 2023)

Added

  • Added support for view of a Containers.DenseAxisArray (#3152) (#3180)
  • Added support for containers of variables in ComplexPlane (#3184)
  • Added support for minimum and maximum generators in nonlinear expressions (#3189)
  • Added SnoopPrecompile statements that reduce the time-to-first-solve in Julia 1.9 (#3193) (#3195) (#3196) (#3197)

Other

  • Large refactoring of the tests (#3166) (#3167) (#3168) (#3169) (#3170) (#3171)
  • Remove unreachable code due to VERSION checks (#3172)
  • Document how to test JuMP extensions (#3174)
  • Fix method ambiguities in Containers (#3173)
  • Improve error message that is thrown when = is used instead of == in the @constraint macro (#3178)
  • Improve the error message when Bool is used instead of Bin in the @variable macro (#3180)
  • Update versions of the documentation (#3185)
  • Tidy the import of packages and remove unnecessary prefixes (#3186) (#3187)
  • Refactor src/JuMP.jl by moving methods into more relevant files (#3188)
  • Fix docstring of Model not appearing in the documentation (#3198)

Version 1.6.0 (January 1, 2023)

Added

Fixed

  • Fixed promotion of complex expressions (#3150) (#3164)

Other

  • Added Benders tutorial with in-place resolves (#3145)
  • Added more Tips and tricks for linear programs (#3144) (#3163)
  • Clarified documentation that start can depend on the indices of a variable container (#3148)
  • Replace instances of length and size by the recommended eachindex and axes (#3149)
  • Added a warning explaining why the model is dirty when accessing solution results from a modified model (#3156)
  • Clarify documentation that PSD ensures a symmetric matrix (#3159)
  • Maintenance of the JuMP test suite (#3146) (#3158) (#3162)

Version 1.5.0 (December 8, 2022)

Added

Fixed

  • Fixed error message for vectorized interval constraints (#3123)
  • Fixed passing AbstractString to set_optimizer_attribute (#3127)

Other

  • Update package versions used in docs (#3119) (#3133) (#3139)
  • Fixed output of diet tutorial (#3120)
  • Explain how to use Dates.period in set_time_limit_sec (#3121)
  • Update to JuliaFormatter v1.0.15 (#3130)
  • Fixed HTTP server example in web_app.jl (#3131)
  • Update docs to build with Documenter#master (#3094)
  • Add tests for LinearAlgebra operations (#3132)
  • Tidy these release notes (#3135)
  • Added documentation for Complex number support (#3141)
  • Removed the "workforce scheduling" and "steelT3" tutorials (#3143)

Version 1.4.0 (October 29, 2022)

Added

Fixed

  • Fixed a bug in copy_to(dest::Model, src::MOI.ModelLike) when src has nonlinear components (#3101)
  • Fixed the printing of (-1.0 + 0.0im) coefficients in complex expressions (#3112)
  • Fixed a parsing bug in nonlinear expressions with generator statements that contain multiple for statements (#3116)

Other

  • Converted the multi-commodity flow tutorial to use an SQLite database (#3098)
  • Fixed a number of typos in the documentation (#3103) (#3107) (#3018)
  • Improved various style aspects of the PDF documentation (#3095) (#3098) (#3102)

Version 1.3.1 (September 28, 2022)

Fixed

  • Fixed a performance issue in relax_integrality (#3087)
  • Fixed the type stability of operators with Complex arguments (#3072)
  • Fixed a bug which added additional +() terms to some nonlinear expressions (#3091)
  • Fixed potential method ambiguities with AffExpr and QuadExpr objects (#3092)

Other

  • Added vale as a linter for the documentation (#3080)
  • Added a tutorial on debugging JuMP models (#3043)
  • Fixed a number of typos in the documentation (#3079) (#3083)
  • Many other small tweaks to the documentation (#3068) (#3073) (#3074) (#3075) (#3076) (#3077) (#3078) (#3081) (#3082) (#3084) (#3085) (#3089)

Version 1.3.0 (September 5, 2022)

Added

  • Support slicing in SparseAxisArray (#3031)

Fixed

  • Fixed a bug introduced in v1.2.0 that prevented DenseAxisArrays with Vector keys (#3064)

Other

  • Released the JuMP logos under the CC BY 4.0 license (#3063)
  • Minor tweaks to the documentation (#3054) (#3056) (#3057) (#3060) (#3061) (#3065)
  • Improved code coverage of a number of files (#3048) (#3049) (#3050) (#3051) (#3052) (#3053) (#3058) (#3059)

Version 1.2.1 (August 22, 2022)

Fixed

  • Fixed a bug when parsing two-sided nonlinear constraints (#3045)

Version 1.2.0 (August 16, 2022)

Breaking

This is a large minor release because it significantly refactors the internal code for handling nonlinear programs to use the MathOptInterface.Nonlinear submodule that was introduced in MathOptInterface v1.3.0. As a consequence, the internal datastructure in model.nlp_data has been removed, as has the JuMP._Derivatives submodule. Despite the changes, the public API for nonlinear programming has not changed, and any code that uses only the public API and that worked with v1.1.1 will continue to work with v1.2.0.

Added

  • Added all_constraints(model; include_variable_in_set_constraints) which simplifies returning a list of all constraint indices in the model.
  • Added the ability to delete nonlinear constraints via delete(::Model, ::NonlinearConstraintRef).
  • Added the ability to provide an explicit Hessian for a multivariate user-defined function.
  • Added support for querying the primal value of a nonlinear constraint via value(::NonlinearConstraintRef)

Fixed

  • Fixed a bug in Containers.DenseAxisArray so that it now supports indexing with keys that hash to the same value, even if they are different types, for example, Int32 and Int64.
  • Fixed a bug printing the model when the solver does not support MOI.Name.

Other

  • Added a constraint programming formulation to the Sudoku tutorial.
  • Added newly supported solvers Pajarito, Clarabel, and COPT to the installation table.
  • Fixed a variety of other miscellaneous issues in the documentation.

Version 1.1.1 (June 14, 2022)

Other

  • Fixed problem displaying LaTeX in the documentation
  • Minor updates to the style guide
  • Updated to MOI v1.4.0 in the documentation

Version 1.1.0 (May 25, 2022)

Added

  • Added num_constraints(::Model; count_variable_in_set_constraints) to simplify the process of counting the number of constraints in a model
  • Added VariableRef(::ConstraintRef) for querying the variable associated with a bound or integrality constraint.
  • Added set_normalized_coefficients for modifying the variable coefficients of a vector-valued constraint.
  • Added set_string_names_on_creation to disable creating String names for variables and constraints. This can improve performance.

Fixed

  • Fixed a bug passing nothing to the start keyword of @variable

Other

  • New tutorials:
    • Sensitivity analysis of a linear program
    • Serving web apps
  • Minimal ellipse SDP tutorial refactored and improved
  • Docs updated to the latest version of each package
  • Lots of minor fixes and improvements to the documentation

Version 1.0.0 (March 24, 2022)

Read more about this release, along with an acknowledgement of all the contributors in our JuMP 1.0.0 is released blog post.

Breaking

  • The previously deprecated functions (v0.23.0, v0.23.1) have been removed. Deprecation was to improve consistency of function names:
    • num_nl_constraints (see num_nonlinear_constraints)
    • all_nl_constraints (see all_nonlinear_constraints)
    • add_NL_expression (see add_nonlinear_expression)
    • set_NL_objective (see set_nonlinear_objective)
    • add_NL_constraint (see add_nonlinear_constraint)
    • nl_expr_string (see nonlinear_expr_string)
    • nl_constraint_string (see nonlinear_constraint_string)
    • SymMatrixSpace (see SymmetricMatrixSpace)
  • The unintentionally exported variable JuMP.op_hint has been renamed to the unexported JuMP._OP_HINT

Fixed

  • Fixed a bug writing .nl files
  • Fixed a bug broadcasting SparseAxisArrays

Version 0.23.2 (March 14, 2022)

Added

  • Added relative_gap to solution_summary
  • register now throws an informative error if the function is not differentiable using ForwardDiff. In some cases, the check in register will encounter a false negative, and the informative error will be thrown at run-time. This usually happens when the function is non-differentiable in a subset of the domain.

Fixed

  • Fixed a scoping issue when extending the container keyword of containers

Other

  • Docs updated to the latest version of each package

Version 0.23.1 (March 2, 2022)

Deprecated

  • nl_expr_string and nl_constraint_string have been renamed to nonlinear_expr_string and nonlinear_constraint_string. The old methods still exist with deprecation warnings. This change should impact very few users because to call them you must rely on private internals of the nonlinear API. Users are encouraged to use sprint(show, x) instead, where x is the nonlinear expression or constraint of interest.

Added

  • Added support for Base.abs2(x) where x is a variable or affine expression. This is mainly useful for complex-valued constraints.

Fixed

  • Fixed addition of complex and real affine expressions
  • Fixed arithmetic for Complex-valued quadratic expressions
  • Fixed variable bounds passed as Rational{Int}(Inf)
  • Fixed printing of the coefficient (0 + 1im)
  • Fixed a bug when solution_summary is called prior to optimize!

Version 0.23.0 (February 25, 2022)

JuMP v0.23.0 is a breaking release. It is also a release-candidate for JuMP v1.0.0. That is, if no issues are found with the v0.23.0 release, then it will be re-tagged as v1.0.0.

Breaking

  • Julia 1.6 is now the minimum supported version
  • MathOptInterface has been updated to v1.0.0
  • All previously deprecated functionality has been removed
  • PrintMode, REPLMode and IJuliaMode have been removed in favor of the MIME types MIME"text/plain" and MIME"text/latex". Replace instances of ::Type{REPLMode} with ::MIME"text/plain", REPLMode with MIME("text/plain"), ::Type{IJuliaMode} with ::MIME"text/latex", and IJuliaMode with MIME("text/latex").
  • Functions containing the nl_ acronym have been renamed to the more explicit nonlinear_. For example, num_nl_constraints is now num_nonlinear_constraints and set_NL_objective is now set_nonlinear_objective. Calls to the old functions throw an error explaining the new name.
  • SymMatrixSpace has been renamed to SymmetricMatrixSpace

Added

  • Added nonlinear_dual_start_value and set_nonlinear_dual_start_value
  • Added preliminary support for Complex coefficient types

Fixed

  • Fixed a bug in solution_summary

Other

  • MILP examples have been migrated from GLPK to HiGHS
  • Fixed various typos
  • Improved section on setting constraint start values

Troubleshooting problems when updating

If you experience problems when updating, you are likely using previously deprecated functionality. (By default, Julia does not warn when you use deprecated features.)

To find the deprecated features you are using, start Julia with --depwarn=yes:

$ julia --depwarn=yes

Then install JuMP v0.22.3:

julia> using Pkg
-julia> pkg"add JuMP@0.22.3"

And then run your code. Apply any suggestions, or search the release notes below for advice on updating a specific deprecated feature.

Version 0.22.3 (February 10, 2022)

Fixed

  • Fixed a reproducibility issue in the TSP tutorial
  • Fixed a reproducibility issue in the max_cut_sdp tutorial
  • Fixed a bug broadcasting an empty SparseAxisArray

Other

  • Added a warning and improved documentation for the modify-then-query case
  • Fixed a typo in the docstring of RotatedSecondOrderCone
  • Added Aqua.jl as a check for code health
  • Added introductions to each section of the tutorials
  • Improved the column generation and Benders decomposition tutorials
  • Updated documentation to MOI v0.10.8
  • Updated JuliaFormatter to v0.22.2

Version 0.22.2 (January 10, 2022)

Added

  • The function all_nl_constraints now returns all nonlinear constraints in a model
  • start_value and set_start_value can now be used to get and set the primal start for constraint references
  • Plural macros now return a tuple containing the elements that were defined instead of nothing
  • Anonymous variables are now printed as _[i] where i is the index of the variable instead of noname. Calling name(x) still returns "" so this is non-breaking.

Fixed

  • Fixed handling of min and max in nonlinear expressions
  • CartesianIndex is no longer allowed as a key for DenseAxisArrays.

Other

  • Improved the performance of GenericAffExpr
  • Added a tutorial on the Travelling Salesperson Problem
  • Added a tutorial on querying the Hessian of a nonlinear program
  • Added documentation on using custom solver binaries.

Version 0.22.1 (November 29, 2021)

Added

  • Export OptimizationSense enum, with instances: MIN_SENSE, MAX_SENSE, and FEASIBILITY_SENSE
  • Add Base.isempty(::Model) to match Base.empty(::Model)

Fixed

  • Fix bug in container with tuples as indices
  • Fix bug in set_time_limit_sec

Other

  • Add tutorial "Design patterns for larger models"
  • Remove release notes section from PDF
  • General edits of the documentation and error messages

Version 0.22.0 (November 10, 2021)

JuMP v0.22 is a breaking release

Breaking

JuMP 0.22 contains a number of breaking changes. However, these should be invisible for the majority of users. You will mostly encounter these breaking changes if you: wrote a JuMP extension, accessed backend(model), or called @SDconstraint.

The breaking changes are as follows:

  • MathOptInterface has been updated to v0.10.4. For users who have interacted with the MOI backend, this contains a large number of breaking changes. Read the MathOptInterface release notes for more details.
  • The bridge_constraints keyword argument to Model and set_optimizer has been renamed add_bridges to reflect that more thing were bridged than just constraints.
  • The backend(model) field now contains a concrete instance of a MOI.Utilities.CachingOptimizer instead of one with an abstractly typed optimizer field. In most cases, this will lead to improved performance. However, calling set_optimizer after backend invalidates the old backend. For example:
    model = Model()
    +

    Release notes

    The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.

    Version 1.23.5 (November 19, 2024)

    Fixed

    Other

    • Updated upload-artifact GitHub action (#3877)
    • Added a section on Common subexpressions (#3879)
    • Fixed a printing change for DimensionalData.jl (#3880)

    Version 1.23.4 (November 8, 2024)

    Fixed

    • Fixed UnsupportedNonlinearOperator error for the single argument LinearAlgebra.norm (#3864)
    • Fixed printing MOI.Interval with MIME"text/latex" (#3866)

    Other

    • Various minor improvements to the documentation (#3855) (#3860)
    • Added MathOptAI.jl and MathOptSymbolicAD.jl to the list of extensions in the documentation (#3858)
    • Clarified add_to_expression! can add two expressions (#3859)
    • Added SHOT to the installation table (#3853)
    • Improvements to test coverage (#3867) (#3868) (#3869) (#3870) (#3871) (#3872) (#3873) (#3874) (#3875)
    • JuMP now uses MOI.add_constrained_variable when adding a scalar variable with bounds for improving model creation performance with some solvers (#3863) (#3865)

    Version 1.23.3 (October 21, 2024)

    Fixed

    • Fixed a printing bug with scientific numbers in MIME"text/latex" (#3838)
    • Fixed support for AbstractString in set_attribute (#3840)
    • Fixed a bug reporting vector-valued duals in solution_summary (#3846)
    • Fixed solution_summary when there are duplicate variable and constraint names (#3848)

    Other

    Version 1.23.2 (September 13, 2024)

    Fixed

    • Fixed an illegal simplification in MA.operate!! for NonlinearExpr (#3826)

    Other

    • Added Rolling horizon problems tutorial (#3815)
    • Added more tests for shapes and dual shapes (#3816)
    • Added more packages to extension-tests.yml (#3817) (#3818)
    • Removed an unnecessary test(#3819)
    • Documentation improvements (#3820) (#3822) (#3823)
    • Added PiecewiseLinearOpt.jl to the docs (#3824)

    Version 1.23.1 (August 30, 2024)

    Fixed

    • Fixed a bug with indicator constraints and the in set syntax (#3813)

    Other

    Version 1.23.0 (August 13, 2024)

    Added

    • Added set inequality syntax for matrices (#3766)
    • Improved matrix inequality support (#3778) (#3805)

    Fixed

    • Fixed a method for calling value on a ::Number (#3776)
    • Fixed querying dual of Symmetric and Hermitian equality constraints (#3797)
    • Fixed read_from_file for coefficient types other than Float64 (#3801)

    Other

    • Documentation improvements
      • Fixed missing character in installation instructions (#3777)
      • Added a section of querying the Jacobian (#3779)
      • Clarify that SCIP does not support lazy constraints (#3784)
      • Fixed typo in knapsack.jl (#3792)
      • Added a warning to docs about tolerances in Bin and Int variables (#3794)
      • Clarify where to type installation commands (#3795)
    • Improve error message for common incorrect syntax in constraint macro (#3781)
    • Changed show(::IO, ::GenericModel) to a more informative tree structure (#3803)

    Version 1.22.2 (June 17, 2024)

    Fixed

    • Fixed printing to omit terms when printing a large array of expressions (#3759)
    • Fixed bug in printing when show is called on an invalid variable or constraint (#3763)

    Other

    • Improved error message for unsupported kwargs in variable macro (#3751)
    • Improved error message for unsupported container syntax like x[A][B] (#3756)
    • Docstring improvements (#3758), (#3760), (#3761), (#3767)
    • Added warning to documentation about Y <= X, Set() syntax (#3769)
    • Work-around change on nightly (#3753), (#3754)
    • Improved printing of symmetric matrices when used in constraints (#3768)
    • Fixed a test for upcoming printing change in MOI (#3772)
    • Updated should_i_use.md (#3773)

    Version 1.22.1 (May 17, 2024)

    Fixed

    • Fixed bug including non-.jl files in src/macros.jl (#3747)

    Other

    • Added DSDP to the list of supported solvers (#3745)
    • Updated YALMIP migration guide (#3748)

    Version 1.22.0 (May 12, 2024)

    Added

    • Added Base.complex(r, i) where r and i may be real-valued variables or affine or quadratic expressions (#3734)
    • Added @force_nonlinear for controlling when affine and quadratic expressions are instead parsed as nonlinear expressions. This can be useful for advanced users in a limited set of circumstances. (#3732)
    • Added support for returning the variable coefficients of a vector-valued constraint via normalized_coefficient. In addition, set_normalized_coefficients has been softly deprecated (no warning is thrown and old code will still work for all future 1.X releases of JuMP) in favor of set_normalized_coefficient. This change was made to unify how we get and set variable coefficients. (#3743)

    Fixed

    • Fixed missing promote_operation method that resulted in slow code (#3730)
    • Improved performance of getindex for Containers.DenseAxisArray (#3731)
    • Fixed the error message when the legacy nonlinear API is mixed with the new nonlinear API. In particular, we now uniformly throw an error message when unexpected objects occur in nonlinear expressions. (#3741)

    Other

    • Updated documentation (#3727), (#3728), (#3739)
    • Updated versions in GitHub actions (#3735)

    Version 1.21.1 (April 11, 2024)

    Fixed

    • Fixed behavior of complex-value related functions like real, imag, conj and abs2 when called on GenericNonlinearExpr. This fixes a method error when calling x' where x is an array of nonlinear expressions. As a related consequence, we now always error when creating nonlinear expressions with complex components. Previously, only some constructors were checked for complex expressionns. (#3724)

    Other

    • Documentation improvements (#3719) (#3720) (#3721) (#3722)

    Version 1.21.0 (March 31, 2024)

    Added

    • Added support for matrix inequality constraints with the HermitianPSDCone (#3705)
    • Added batched modification methods for set_normalized_rhs, set_objective_coefficient and set_normalized_coefficient. Using these methods can be more efficient for some solvers (#3716)
    • Added the private constant _CONSTRAINT_LIMIT_FOR_PRINTING, which controls how many constraints are printed to the screen during print(model). The main purpose of this is to prevent large quantities of text being printed when print(model) is accidentally called on a large model. (#3686)

    Fixed

    • Changed Containers.SparseAxisArray to use an OrderedDict as the backing data structure. Iterating over the elements in a SparseAxisArray now iterates in the order that the elements were created. Previously, the order was undefined behavior. (#3681)
    • Fixed complex variables for non-Float64 coefficient types (#3691)
    • Fixed LinearAlgebra.hermitan(::AbstractJuMPScalar) (#3693)
    • Fixed multiplying real scalar by Hermitian matrix (#3695)

    Other

    Version 1.20.0 (February 15, 2024)

    Added

    Fixed

    • Fixed compat of DimensionalData (#3666)
    • Fixed convert(::Type{NonlinearExpr}, ::Number)(#3672)

    Other

    • Added Optim to list of solvers (#3624)
    • Improved linking within documentation (#3669)

    Version 1.19.0 (February 1, 2024)

    Added

    • Added support for modifying quadratic coefficients (#3658)

    Fixed

    • Fixed short circuiting of && and || in macros (#3655)

    Other

    • Added SDPLR to list of solvers (#3644)
    • Added new roadmap items (#3645)
    • Fixed vale.sh version (#3650)
    • Improve error messages in macros (#3653)
    • Refactoring of set_normalized_coefficient (#3660) (#3661)
    • Update docs/packages.toml (#3662)

    Version 1.18.1 (January 6, 2024)

    Fixed

    • Fixed escaping of the set keyword in @variable (#3647)

    Version 1.18.0 (January 2, 2024)

    Added

    • This release includes a large refactoring of the macro code that closes a roadmap item (#3629) Contributing pull requests include (#3600), (#3603), (#3606), (#3607), (#3610), (#3611), (#3612), (#3613), (#3614), (#3615), (#3617), (#3618), (#3619), (#3620), (#3621), (#3631), (#3632), (#3633)

    Fixed

    • Fixed error for unsupported objective sense (#3601)
    • Fixed text/latex printing of GenericNonlinearExpr (#3609)
    • Fixed compat bounds of stdlib packages (#3626)
    • Fixed a bug that can accidentally modify the user's expressions in a macro (#3639)
    • Fixed a bug converting AffExpr to GenericNonlinearExpr (#3642)

    Other

    • Added DisjunctiveProgrammingto extension-tests (#3597)
    • Added DisjunctiveProgrammingto docs (#3598)
    • Added DocumenterCitations to the docs (#3596), (#3630)
    • Migrate from SnoopPrecompile to PrecompileTools (#3608)
    • Minor documentation updates (#3623), (#3628), (#3635), (#3640), (#3643)

    Version 1.17.0 (December 4, 2023)

    Added

    Fixed

    • Fixed variable_ref_type for unsupported types (#3556)
    • Fixed convert type of constraint starting values (#3571)
    • Fixed various methods to support AbstractJuMPScalar with Distances.jl (#3583)
    • Fixed eachindex for multiple arguments of Containers.DenseAxisArray and Containers.SparseAxisArray (#3587)
    • Expressions with more than 60 terms now print in truncated form. This prevents large expressions from being accidentally printed to terminal or IJulia output (#3575)
    • Fixed a type instability in set_objective_coefficient (#3590)
    • Various fixes to the documentation (#3593) (#3595)

    Other

    • Improved error messages for:
      • Addition and subtraction between a matrix and a scalar (#3557) (#3558)
      • Variables with non-constant bounds (#3583)
      • Invalid indicator constraints (#3584)
    • Added new solvers to the documentation:
      • EAGO.jl (#3560) (#3561)
      • Manopt.jl (#3568)
      • Percival.jl (#3567)
    • Added new tutorials:
    • Improved documentation for:
    • Updated package compat bounds (#3578)

    Version 1.16.0 (October 24, 2023)

    Added

    • Added := operator for Boolean satisfiability problems (#3530)

    Fixed

    • Fixed text/latex printing of MOI.Interval sets (#3537)
    • Fixed tests with duplicate function names (#3539)

    Other

    • Updated documentation list of supported solvers (#3527) (#3529) (#3538) (#3542) (#3545) (#3546)
    • Updated to Documenter@1.1 (#3528)
    • Fixed various tutorials (#3534) (#3532)
    • Fixed Project.toml compat bounds for standard libraries (#3544)

    Version 1.15.1 (September 24, 2023)

    Fixed

    • Fixed support for single argument min and max operators (#3522)
    • Fixed error message for add_to_expression! when called with a GenericNonlinearExpr (#3506)
    • Fixed constraint tags with broadcasted constraints (#3515)
    • Fixed MethodError in MA.scaling (#3518)
    • Fixed support for arrays of Parameter variables (#3524)

    Other

    • Updated to Documenter@1 (#3501)
    • Fixed links to data in tutorials (#3512)
    • Fixed typo in TSP tutorial (#3516)
    • Improved error message for VariableNotOwned errors (#3520)
    • Fixed various JET errors (#3519)

    Version 1.15.0 (September 15, 2023)

    This is a large minor release because it adds an entirely new data structure and API path for working with nonlinear programs. The previous nonlinear interface remains unchanged and is documented at Nonlinear Modeling (Legacy). The new interface is a treated as a non-breaking feature addition and is documented at Nonlinear Modeling.

    Breaking

    Although the new nonlinear interface is a feature addition, there are two changes which might be breaking for a very small number of users.

    • The syntax inside JuMP macros is parsed using a different code path, even for linear and quadratic expressions. We made this change to unify how we parse linear, quadratic, and nonlinear expressions. In all cases, the new code returns equivalent expressions, but because of the different order of operations, there are three changes to be aware of when updating:
      • The printed form of the expression may change, for example from x * y to y * x. This can cause tests which test the String representation of a model to fail.
      • Some coefficients may change slightly due to floating point round-off error.
      • Particularly when working with a JuMP extension, you may encounter a MethodError due to a missing or ambiguous method. These errors are due to previously existing bugs that were not triggered by the previous parsing code. If you encounter such an error, please open a GitHub issue.
    • The methods for Base.:^(x::VariableRef, n::Integer) and Base.:^(x::AffExpr, n::Integer) have changed. Previously, these methods supported only n = 0, 1, 2 and they always returned a QuadExpr, even for the case when n = 0 or n = 1. Now:
      • x^0 returns one(T), where T is the value_type of the model (defaults to Float64)
      • x^1 returns x
      • x^2 returns a QuadExpr
      • x^n where !(0 <= n <= 2) returns a NonlinearExpr.
      We made this change to support nonlinear expressions and to align the mathematical definition of the operation with their return type. (Previously, users were surprised that x^1 returned a QuadExpr.) As a consequence of this change, the methods are now not type-stable. This means that the compiler cannot prove that x^2 returns a QuadExpr. If benchmarking shows that this is a performance problem, you can use the type-stable x * x instead of x^2.

    Added

    Fixed

    • Fixed uses of @nospecialize which cause precompilation failures in Julia v1.6.0 and v1.6.1. (#3464)
    • Fixed adding a container of Parameter (#3473)
    • Fixed return type of x^0 and x^1 to no longer return QuadExpr (see note in Breaking section above) (#3474)
    • Fixed error messages in LowerBoundRef, UpperBoundRef, FixRef, IntegerRef, BinaryRef, ParameterRef and related functions (#3494)
    • Fixed type inference of empty containers in JuMP macros (#3500)

    Other

    • Added GAMS to solver documentation (#3357)
    • Updated various tutorials (#3459) (#3460) (#3462) (#3463) (#3465) (#3490) (#3492) (#3503)
    • Added The network multi-commodity flow problem tutorial (#3491)
    • Added Two-stage stochastic programs tutorial (#3466)
    • Added better error messages for unsupported operations in LinearAlgebra (#3476)
    • Updated to the latest version of Documenter (#3484) (#3495) (#3497)
    • Updated GitHub action versions (#3507)

    Version 1.14.1 (September 2, 2023)

    Fixed

    • Fix links in Documentation (#3478)

    Version 1.14.0 (August 27, 2023)

    Added

    Fixed

    • Fixed model_convert for BridgeableConstraint (#3437)
    • Fixed printing models with integer coefficients larger than typemax(Int) (#3447)
    • Fixed support for constant left-hand side functions in a complementarity constraint (#3452)

    Other

    • Updated packages used in documentation (#3444) (#3455)
    • Fixed docstring tests (#3445)
    • Fixed printing change for MathOptInterface (#3446)
    • Fixed typos in documentation (#3448) (#3457)
    • Added SCIP to callback documentation (#3449)

    Version 1.13.0 (July 27, 2023)

    Added

    Fixed

    Other

    • Added Loraine.jl to the installation table (#3426)
    • Removed Penopt.jl from packages.toml (#3428)
    • Improved problem statement in cannery example of tutorial (#3430)
    • Minor cleanups in Containers.DenseAxisArray implementation (#3429)
    • Changed nested_problems.jl: outer/inner to upper/lower (#3433)
    • Removed second SDP relaxation in OPF tutorial (#3432)

    Version 1.12.0 (June 19, 2023)

    Added

    Fixed

    • Fixed error message for matrix in HermitianPSDCone (#3369)
    • Fixed EditURL for custom documentation pages (#3373)
    • Fixed return type annotations for MOI.ConstraintPrimal and MOI.ConstraintDual (#3381)
    • Fixed printing change in Julia nightly (#3391)
    • Fixed printing of Complex coefficients (#3397)
    • Fixed printing of constraints in text/latex mode (#3405)
    • Fixed performance issue in Containers.rowtable (#3410)
    • Fixed bug when variables added to set of wrong dimension (#3411)

    Other

    • Added more solver READMEs to the documentation (#3358) (#3360) (#3364) (#3365) (#3366) (#3368) (#3372) (#3374) (#3376) (#3379) (#3387) (#3389)
    • Added StatusSwitchingQP.jl to the installation table (#3354)
    • Updated checklist for adding a new solver (#3370)
    • Updated extension-tests.yml action (#3371) (#3375)
    • Color logs in GitHub actions (#3392)
    • Added new tutorials
    • Updated JuMP paper citation (#3400)
    • Changed GitHub action to upload LaTeX logs when building documentation (#3403)
    • Fixed printing of SCS log in documentation (#3406)
    • Updated solver versions (#3407)
    • Updated documentation to use Julia v1.9 (#3398)
    • Replaced _value_type with MOI.Utilities.value_type (#3414)
    • Fixed a typo in docstring (#3415)
    • Refactored API documentation (#3386)
    • Updated SCIP license (#3420)

    Version 1.11.1 (May 19, 2023)

    Fixed

    • Fixed a poor error message when sum(::DenseAxisArray; dims) was called (#3338)
    • Fixed support for dependent sets in the @variable macro (#3344)
    • Fixed a performance bug in constraints with sparse symmetric matrices (#3349)

    Other

    • Improved the printing of complex numbers (#3332)
    • When printing, sets which contain constants ending in .0 now print as integers. This follows the behavior of constants in functions (#3341)
    • Added InfiniteOpt to the extensions documentation (#3343)
    • Added more documentation for the exponential cone (#3345) (#3347)
    • Added checklists for developers (#3346) (#3355)
    • Fixed test support upcoming Julia nightly (#3351)
    • Fixed extension-tests.yml action (#3353)
    • Add more solvers to the documentation (#3359) (#3361) (#3362)

    Version 1.11.0 (May 3, 2023)

    Added

    • Added new methods to print_active_bridges for printing a particular objective, constraint, or variable (#3316)

    Fixed

    • Fixed tests for MOI v1.14.0 release (#3312)
    • Fixed indexing containers when an axis is Vector{Any} that contains a Vector{Any} element (#3280)
    • Fixed getindex(::AbstractJuMPScalar) which is called for an expression like x[] (#3314)
    • Fixed bug in set_string_names_on_creation with a vector of variables (#3322)
    • Fixed bug in memoize function in nonlinear documentation (#3337)

    Other

    • Fixed typos in the documentation (#3317) (#3318) (#3328)
    • Added a test for the order of setting start values (#3315)
    • Added READMEs of solvers and extensions to the docs (#3309) (#3320) (#3327) (#3329) (#3333)
    • Style improvements to src/variables.jl (#3324)
    • Clarify that column generation does not find global optimum (#3325)
    • Add a GitHub actions workflow for testing extensions prior to release (#3331)
    • Document the release process for JuMP (#3334)
    • Fix links to discourse and chatroom (#3335)

    Version 1.10.0 (April 3, 2023)

    Added

    • Added Nonnegatives, Nonpositives and Zeros, and support vector-valued inequality syntax in the JuMP macros (#3273)
    • Added special support for LinearAlgebra.Symmetric and LinearAlgebra.Hermitian matrices in Zeros constraints (#3281) (#3296)
    • Added HermitianMatrixSpace and the Hermitian tag for generating a matrix of variables that is Hermitian (#3292) (#3293)
    • Added Semicontinuous and Semiinteger (#3302)
    • Added support for keyword indexing of containers (#3237)

    Fixed

    • Fixed [compat] bound for MathOptInterface in Project.toml (#3272)

    Other

    • Split out the Nested optimization problems tutorial (#3274)
    • Updated doctests to ensure none have hidden state (#3275) (#3276)
    • Clarified how lazy constraints may revisit points (#3278)
    • Added P-Norm example (#3282)
    • Clarified docs that macros create new bindings (#3284)
    • Fixed threading example (#3283)
    • Added plot to The minimum distortion problem (#3288)
    • Added Google style rules for Vale and fixed warnings (#3285)
    • Added citation for the JuMP 1.0 paper (#3294)
    • Updated package versions in the documentation (#3298)
    • Added comment for the order in which start values must be set (#3303)
    • Improved error message for unrecognized constraint operators (#3311)

    Version 1.9.0 (March 7, 2023)

    Added

    Fixed

    • The matrix returned by a variable in HermitianPSDCone is now a LinearAlgebra.Hermitian matrix. This is potentially breaking if you have written code to assume the return is a Matrix. (#3245) (#3246)
    • Fixed missing support for Base.isreal of expressions (#3252)

    Other

    • Fixed a thread safety issue in the Parallelism tutorial (#3240) (#3243)
    • Improved the error message when unsupported operators are used in @NL macros (#3236)
    • Clarified the documentation to say that matrices in HermitianPSDCone must be LinearAlgebra.Hermitian (#3241)
    • Minor style fixes to internal macro code (#3247)
    • Add Example: quantum state discrimination tutorial (#3250)
    • Improve error message when begin...end not passed to plural macros (#3255)
    • Document how to register function with varying number of input arguments (#3258)
    • Tidy tests by removing unneeded JuMP. prefixes (#3260)
    • Clarified the introduction to the Complex number support tutorial (#3262)
    • Fixed typos in the Documentation (#3263) (#3266) (#3268) (#3269)

    Version 1.8.2 (February 27, 2023)

    Fixed

    • Fixed dot product between complex JuMP expression and number (#3244)

    Other

    • Polish simple SDP examples (#3232)

    Version 1.8.1 (February 23, 2023)

    Fixed

    • Fixed support for init in nonlinear generator expressions (#3226)

    Other

    • Use and document import MathOptInterface as MOI (#3222)
    • Removed references in documentation to multiobjective optimization being unsupported (#3223)
    • Added tutorial on multi-objective portfolio optimization (#3227)
    • Refactored some of the conic tutorials (#3229)
    • Fixed typos in the documentation (#3230)
    • Added tutorial on parallelism (#3231)

    Version 1.8.0 (February 16, 2023)

    Added

    • Added --> syntax support for indicator constraints. The old syntax of => remains supported (#3207)
    • Added <--> syntax for reified constraints. For now, few solvers support reified constraints (#3206)
    • Added fix_discrete_variables. This is most useful for computing the dual of a mixed-integer program (#3208)
    • Added support for vector-valued objectives. For details, see the Multi-objective knapsack tutorial (#3176)

    Fixed

    • Fixed a bug in lp_sensitivity_report by switching to an explicit LU factorization of the basis matrix (#3182)
    • Fixed a bug that prevented [; kwarg] arguments in macros (#3220)

    Other

    Version 1.7.0 (January 25, 2023)

    Added

    • Added support for view of a Containers.DenseAxisArray (#3152) (#3180)
    • Added support for containers of variables in ComplexPlane (#3184)
    • Added support for minimum and maximum generators in nonlinear expressions (#3189)
    • Added SnoopPrecompile statements that reduce the time-to-first-solve in Julia 1.9 (#3193) (#3195) (#3196) (#3197)

    Other

    • Large refactoring of the tests (#3166) (#3167) (#3168) (#3169) (#3170) (#3171)
    • Remove unreachable code due to VERSION checks (#3172)
    • Document how to test JuMP extensions (#3174)
    • Fix method ambiguities in Containers (#3173)
    • Improve error message that is thrown when = is used instead of == in the @constraint macro (#3178)
    • Improve the error message when Bool is used instead of Bin in the @variable macro (#3180)
    • Update versions of the documentation (#3185)
    • Tidy the import of packages and remove unnecessary prefixes (#3186) (#3187)
    • Refactor src/JuMP.jl by moving methods into more relevant files (#3188)
    • Fix docstring of Model not appearing in the documentation (#3198)

    Version 1.6.0 (January 1, 2023)

    Added

    Fixed

    • Fixed promotion of complex expressions (#3150) (#3164)

    Other

    • Added Benders tutorial with in-place resolves (#3145)
    • Added more Tips and tricks for linear programs (#3144) (#3163)
    • Clarified documentation that start can depend on the indices of a variable container (#3148)
    • Replace instances of length and size by the recommended eachindex and axes (#3149)
    • Added a warning explaining why the model is dirty when accessing solution results from a modified model (#3156)
    • Clarify documentation that PSD ensures a symmetric matrix (#3159)
    • Maintenance of the JuMP test suite (#3146) (#3158) (#3162)

    Version 1.5.0 (December 8, 2022)

    Added

    Fixed

    • Fixed error message for vectorized interval constraints (#3123)
    • Fixed passing AbstractString to set_optimizer_attribute (#3127)

    Other

    • Update package versions used in docs (#3119) (#3133) (#3139)
    • Fixed output of diet tutorial (#3120)
    • Explain how to use Dates.period in set_time_limit_sec (#3121)
    • Update to JuliaFormatter v1.0.15 (#3130)
    • Fixed HTTP server example in web_app.jl (#3131)
    • Update docs to build with Documenter#master (#3094)
    • Add tests for LinearAlgebra operations (#3132)
    • Tidy these release notes (#3135)
    • Added documentation for Complex number support (#3141)
    • Removed the "workforce scheduling" and "steelT3" tutorials (#3143)

    Version 1.4.0 (October 29, 2022)

    Added

    Fixed

    • Fixed a bug in copy_to(dest::Model, src::MOI.ModelLike) when src has nonlinear components (#3101)
    • Fixed the printing of (-1.0 + 0.0im) coefficients in complex expressions (#3112)
    • Fixed a parsing bug in nonlinear expressions with generator statements that contain multiple for statements (#3116)

    Other

    • Converted the multi-commodity flow tutorial to use an SQLite database (#3098)
    • Fixed a number of typos in the documentation (#3103) (#3107) (#3018)
    • Improved various style aspects of the PDF documentation (#3095) (#3098) (#3102)

    Version 1.3.1 (September 28, 2022)

    Fixed

    • Fixed a performance issue in relax_integrality (#3087)
    • Fixed the type stability of operators with Complex arguments (#3072)
    • Fixed a bug which added additional +() terms to some nonlinear expressions (#3091)
    • Fixed potential method ambiguities with AffExpr and QuadExpr objects (#3092)

    Other

    • Added vale as a linter for the documentation (#3080)
    • Added a tutorial on debugging JuMP models (#3043)
    • Fixed a number of typos in the documentation (#3079) (#3083)
    • Many other small tweaks to the documentation (#3068) (#3073) (#3074) (#3075) (#3076) (#3077) (#3078) (#3081) (#3082) (#3084) (#3085) (#3089)

    Version 1.3.0 (September 5, 2022)

    Added

    • Support slicing in SparseAxisArray (#3031)

    Fixed

    • Fixed a bug introduced in v1.2.0 that prevented DenseAxisArrays with Vector keys (#3064)

    Other

    • Released the JuMP logos under the CC BY 4.0 license (#3063)
    • Minor tweaks to the documentation (#3054) (#3056) (#3057) (#3060) (#3061) (#3065)
    • Improved code coverage of a number of files (#3048) (#3049) (#3050) (#3051) (#3052) (#3053) (#3058) (#3059)

    Version 1.2.1 (August 22, 2022)

    Fixed

    • Fixed a bug when parsing two-sided nonlinear constraints (#3045)

    Version 1.2.0 (August 16, 2022)

    Breaking

    This is a large minor release because it significantly refactors the internal code for handling nonlinear programs to use the MathOptInterface.Nonlinear submodule that was introduced in MathOptInterface v1.3.0. As a consequence, the internal datastructure in model.nlp_data has been removed, as has the JuMP._Derivatives submodule. Despite the changes, the public API for nonlinear programming has not changed, and any code that uses only the public API and that worked with v1.1.1 will continue to work with v1.2.0.

    Added

    • Added all_constraints(model; include_variable_in_set_constraints) which simplifies returning a list of all constraint indices in the model.
    • Added the ability to delete nonlinear constraints via delete(::Model, ::NonlinearConstraintRef).
    • Added the ability to provide an explicit Hessian for a multivariate user-defined function.
    • Added support for querying the primal value of a nonlinear constraint via value(::NonlinearConstraintRef)

    Fixed

    • Fixed a bug in Containers.DenseAxisArray so that it now supports indexing with keys that hash to the same value, even if they are different types, for example, Int32 and Int64.
    • Fixed a bug printing the model when the solver does not support MOI.Name.

    Other

    • Added a constraint programming formulation to the Sudoku tutorial.
    • Added newly supported solvers Pajarito, Clarabel, and COPT to the installation table.
    • Fixed a variety of other miscellaneous issues in the documentation.

    Version 1.1.1 (June 14, 2022)

    Other

    • Fixed problem displaying LaTeX in the documentation
    • Minor updates to the style guide
    • Updated to MOI v1.4.0 in the documentation

    Version 1.1.0 (May 25, 2022)

    Added

    • Added num_constraints(::Model; count_variable_in_set_constraints) to simplify the process of counting the number of constraints in a model
    • Added VariableRef(::ConstraintRef) for querying the variable associated with a bound or integrality constraint.
    • Added set_normalized_coefficients for modifying the variable coefficients of a vector-valued constraint.
    • Added set_string_names_on_creation to disable creating String names for variables and constraints. This can improve performance.

    Fixed

    • Fixed a bug passing nothing to the start keyword of @variable

    Other

    • New tutorials:
      • Sensitivity analysis of a linear program
      • Serving web apps
    • Minimal ellipse SDP tutorial refactored and improved
    • Docs updated to the latest version of each package
    • Lots of minor fixes and improvements to the documentation

    Version 1.0.0 (March 24, 2022)

    Read more about this release, along with an acknowledgement of all the contributors in our JuMP 1.0.0 is released blog post.

    Breaking

    • The previously deprecated functions (v0.23.0, v0.23.1) have been removed. Deprecation was to improve consistency of function names:
      • num_nl_constraints (see num_nonlinear_constraints)
      • all_nl_constraints (see all_nonlinear_constraints)
      • add_NL_expression (see add_nonlinear_expression)
      • set_NL_objective (see set_nonlinear_objective)
      • add_NL_constraint (see add_nonlinear_constraint)
      • nl_expr_string (see nonlinear_expr_string)
      • nl_constraint_string (see nonlinear_constraint_string)
      • SymMatrixSpace (see SymmetricMatrixSpace)
    • The unintentionally exported variable JuMP.op_hint has been renamed to the unexported JuMP._OP_HINT

    Fixed

    • Fixed a bug writing .nl files
    • Fixed a bug broadcasting SparseAxisArrays

    Version 0.23.2 (March 14, 2022)

    Added

    • Added relative_gap to solution_summary
    • register now throws an informative error if the function is not differentiable using ForwardDiff. In some cases, the check in register will encounter a false negative, and the informative error will be thrown at run-time. This usually happens when the function is non-differentiable in a subset of the domain.

    Fixed

    • Fixed a scoping issue when extending the container keyword of containers

    Other

    • Docs updated to the latest version of each package

    Version 0.23.1 (March 2, 2022)

    Deprecated

    • nl_expr_string and nl_constraint_string have been renamed to nonlinear_expr_string and nonlinear_constraint_string. The old methods still exist with deprecation warnings. This change should impact very few users because to call them you must rely on private internals of the nonlinear API. Users are encouraged to use sprint(show, x) instead, where x is the nonlinear expression or constraint of interest.

    Added

    • Added support for Base.abs2(x) where x is a variable or affine expression. This is mainly useful for complex-valued constraints.

    Fixed

    • Fixed addition of complex and real affine expressions
    • Fixed arithmetic for Complex-valued quadratic expressions
    • Fixed variable bounds passed as Rational{Int}(Inf)
    • Fixed printing of the coefficient (0 + 1im)
    • Fixed a bug when solution_summary is called prior to optimize!

    Version 0.23.0 (February 25, 2022)

    JuMP v0.23.0 is a breaking release. It is also a release-candidate for JuMP v1.0.0. That is, if no issues are found with the v0.23.0 release, then it will be re-tagged as v1.0.0.

    Breaking

    • Julia 1.6 is now the minimum supported version
    • MathOptInterface has been updated to v1.0.0
    • All previously deprecated functionality has been removed
    • PrintMode, REPLMode and IJuliaMode have been removed in favor of the MIME types MIME"text/plain" and MIME"text/latex". Replace instances of ::Type{REPLMode} with ::MIME"text/plain", REPLMode with MIME("text/plain"), ::Type{IJuliaMode} with ::MIME"text/latex", and IJuliaMode with MIME("text/latex").
    • Functions containing the nl_ acronym have been renamed to the more explicit nonlinear_. For example, num_nl_constraints is now num_nonlinear_constraints and set_NL_objective is now set_nonlinear_objective. Calls to the old functions throw an error explaining the new name.
    • SymMatrixSpace has been renamed to SymmetricMatrixSpace

    Added

    • Added nonlinear_dual_start_value and set_nonlinear_dual_start_value
    • Added preliminary support for Complex coefficient types

    Fixed

    • Fixed a bug in solution_summary

    Other

    • MILP examples have been migrated from GLPK to HiGHS
    • Fixed various typos
    • Improved section on setting constraint start values

    Troubleshooting problems when updating

    If you experience problems when updating, you are likely using previously deprecated functionality. (By default, Julia does not warn when you use deprecated features.)

    To find the deprecated features you are using, start Julia with --depwarn=yes:

    $ julia --depwarn=yes

    Then install JuMP v0.22.3:

    julia> using Pkg
    +julia> pkg"add JuMP@0.22.3"

    And then run your code. Apply any suggestions, or search the release notes below for advice on updating a specific deprecated feature.

    Version 0.22.3 (February 10, 2022)

    Fixed

    • Fixed a reproducibility issue in the TSP tutorial
    • Fixed a reproducibility issue in the max_cut_sdp tutorial
    • Fixed a bug broadcasting an empty SparseAxisArray

    Other

    • Added a warning and improved documentation for the modify-then-query case
    • Fixed a typo in the docstring of RotatedSecondOrderCone
    • Added Aqua.jl as a check for code health
    • Added introductions to each section of the tutorials
    • Improved the column generation and Benders decomposition tutorials
    • Updated documentation to MOI v0.10.8
    • Updated JuliaFormatter to v0.22.2

    Version 0.22.2 (January 10, 2022)

    Added

    • The function all_nl_constraints now returns all nonlinear constraints in a model
    • start_value and set_start_value can now be used to get and set the primal start for constraint references
    • Plural macros now return a tuple containing the elements that were defined instead of nothing
    • Anonymous variables are now printed as _[i] where i is the index of the variable instead of noname. Calling name(x) still returns "" so this is non-breaking.

    Fixed

    • Fixed handling of min and max in nonlinear expressions
    • CartesianIndex is no longer allowed as a key for DenseAxisArrays.

    Other

    • Improved the performance of GenericAffExpr
    • Added a tutorial on the Travelling Salesperson Problem
    • Added a tutorial on querying the Hessian of a nonlinear program
    • Added documentation on using custom solver binaries.

    Version 0.22.1 (November 29, 2021)

    Added

    • Export OptimizationSense enum, with instances: MIN_SENSE, MAX_SENSE, and FEASIBILITY_SENSE
    • Add Base.isempty(::Model) to match Base.empty(::Model)

    Fixed

    • Fix bug in container with tuples as indices
    • Fix bug in set_time_limit_sec

    Other

    • Add tutorial "Design patterns for larger models"
    • Remove release notes section from PDF
    • General edits of the documentation and error messages

    Version 0.22.0 (November 10, 2021)

    JuMP v0.22 is a breaking release

    Breaking

    JuMP 0.22 contains a number of breaking changes. However, these should be invisible for the majority of users. You will mostly encounter these breaking changes if you: wrote a JuMP extension, accessed backend(model), or called @SDconstraint.

    The breaking changes are as follows:

    • MathOptInterface has been updated to v0.10.4. For users who have interacted with the MOI backend, this contains a large number of breaking changes. Read the MathOptInterface release notes for more details.
    • The bridge_constraints keyword argument to Model and set_optimizer has been renamed add_bridges to reflect that more thing were bridged than just constraints.
    • The backend(model) field now contains a concrete instance of a MOI.Utilities.CachingOptimizer instead of one with an abstractly typed optimizer field. In most cases, this will lead to improved performance. However, calling set_optimizer after backend invalidates the old backend. For example:
      model = Model()
       b = backend(model)
       set_optimizer(model, GLPK.Optimizer)
       @variable(model, x)
       # b is not updated with `x`! Get a new b by calling `backend` again.
      -new_b = backend(model)
    • All usages of @SDconstraint are deprecated. The new syntax is @constraint(model, X >= Y, PSDCone()).
    • Creating a DenseAxisArray with a Number as an axis will now display a warning. This catches a common error in which users write @variable(model, x[length(S)]) instead of @variable(model, x[1:length(S)]).
    • The caching_mode argument to Model, for example, Model(caching_mode = MOIU.MANUAL) mode has been removed. For more control over the optimizer, use direct_model instead.
    • The previously deprecated lp_objective_perturbation_range and lp_rhs_perturbation_range functions have been removed. Use lp_sensitivity_report instead.
    • The .m fields of NonlinearExpression and NonlinearParameter have been renamed to .model.
    • Infinite variable bounds are now ignored. Thus, @variable(model, x <= Inf) will show has_upper_bound(x) == false. Previously, these bounds were passed through to the solvers which caused numerical issues for solvers expecting finite bounds.
    • The variable_type and constraint_type functions were removed. This should only affect users who previously wrote JuMP extensions. The functions can be deleted without consequence.
    • The internal functions moi_mode, moi_bridge_constraints, moi_add_constraint, and moi_add_to_function_constant are no longer exported.
    • The un-used method Containers.generate_container has been deleted.
    • The Containers API has been refactored, and _build_ref_sets is now public as Containers.build_ref_sets.
    • The parse_constraint_ methods for extending @constraint at parse time have been refactored in a breaking way. Consult the Extensions documentation for more details and examples.

    Added

    • The TerminationStatusCode and ResultStatusCode enums are now exported by JuMP. Prefer termination_status(model) == OPTIMAL instead of == MOI.OPTIMAL, although the MOI. prefix way still works.
    • Copy a x::DenseAxisArray to an Array by calling Array(x).
    • NonlinearExpression is now a subtype of AbstractJuMPScalar
    • Constraints such as @constraint(model, x + 1 in MOI.Integer()) are now supported.
    • primal_feasibility_report now accepts a function as the first argument.
    • Scalar variables @variable(model, x[1:2] in MOI.Integer()) creates two variables, both of which are constrained to be in the set MOI.Integer.
    • Conic constraints can now be specified as inequalities under a different partial ordering. So @constraint(model, x - y in MOI.Nonnegatives()) can now be written as @constraint(model, x >= y, MOI.Nonnegatives()).
    • Names are now set for vectorized constraints.

    Fixed

    • Fixed a performance issue when show was called on a SparseAxisArray with a large number of elements.
    • Fixed a bug displaying barrier and simplex iterations in solution_summary.
    • Fixed a bug by implementing hash for DenseAxisArray and SparseAxisArray.
    • Names are now only set if the solver supports them. Previously, this prevented solvers such as Ipopt from being used with direct_model.
    • MutableArithmetics.Zero is converted into a 0.0 before being returned to the user. Previously, some calls to @expression would return the undocumented MutableArithmetics.Zero() object. One example is summing over an empty set @expression(model, sum(x[i] for i in 1:0)). You will now get 0.0 instead.
    • AffExpr and QuadExpr can now be used with == 0 instead of iszero. This fixes a number of issues relating to Julia standard libraries such as LinearAlgebra and SparseArrays.
    • Fixed a bug when registering a user-defined function with splatting.

    Other

    • The documentation is now available as a PDF.
    • The documentation now includes a full copy of the MathOptInterface documentation to make it easy to link concepts between the docs. (The MathOptInterface documentation has also been significantly improved.)
    • The documentation contains a large number of improvements and clarifications on a range of topics. Thanks to @sshin23, @DilumAluthge, and @jlwether.
    • The documentation is now built with Julia 1.6 instead of 1.0.
    • Various error messages have been improved to be more readable.

    Version 0.21.10 (September 4, 2021)

    Added

    • Added add_NL_expression
    • add_NL_xxx functions now support AffExpr and QuadExpr as terms

    Fixed

    • Fixed a bug in solution_summary
    • Fixed a bug in relax_integrality

    Other

    • Improved error message in lp_sensitivity_report

    Version 0.21.9 (August 1, 2021)

    Added

    • Containers now support arbitrary container types by passing the type to the container keyword and overloading Containers.container.
    • is_valid now supports nonlinear constraints
    • Added unsafe_backend for querying the inner-most optimizer of a JuMP model.
    • Nonlinear parameters now support the plural @NLparameters macro.
    • Containers (for example, DenseAxisArray) can now be used in vector-valued constraints.

    Other

    • Various improvements to the documentation.

    Version 0.21.8 (May 8, 2021)

    Added

    • The @constraint macro is now extendable in the same way as @variable.
    • AffExpr and QuadExpr can now be used in nonlinear macros.

    Fixed

    • Fixed a bug in lp_sensitivity_report.
    • Fixed an inference issue when creating empty SparseAxisArrays.

    Version 0.21.7 (April 12, 2021)

    Added

    • Added primal_feasibility_report, which can be used to check whether a primal point satisfies primal feasibility.
    • Added coefficient, which returns the coefficient associated with a variable in affine and quadratic expressions.
    • Added copy_conflict, which returns the IIS of an infeasible model.
    • Added solution_summary, which returns (and prints) a struct containing a summary of the solution.
    • Allow AbstractVector in vector constraints instead of just Vector.
    • Added latex_formulation(model) which returns an object representing the latex formulation of a model. Use print(latex_formulation(model)) to print the formulation as a string.
    • User-defined functions in nonlinear expressions are now automatically registered to aid quick model prototyping. However, a warning is printed to encourage the manual registration.
    • DenseAxisArray's now support broadcasting over multiple arrays.
    • Container indices can now be iterators of Base.SizeUnknown.

    Fixed

    • Fixed bug in rad2deg and deg2rad in nonlinear expressions.
    • Fixed a MethodError bug in Containers when forcing container type.
    • Allow partial slicing of a DenseAxisArray, resolving an issue from 2014.
    • Fixed a bug printing variable names in IJulia.
    • Ending an IJulia cell with model now prints a summary of the model (like in the REPL) not the latex formulation. Use print(model) to print the latex formulation.
    • Fixed a bug when copying models containing nested arrays.

    Other

    • Tutorials are now part of the documentation, and more refactoring has taken place.
    • Added JuliaFormatter added as a code formatter.
    • Added some precompilation statements to reduce initial latency.
    • Various improvements to error messages to make them more helpful.
    • Improved performance of value(::NonlinearExpression).
    • Improved performance of fix(::VariableRef).

    Version 0.21.6 (January 29, 2021)

    Added

    • Added support for skew symmetric variables via @variable(model, X[1:2, 1:2] in SkewSymmetricMatrixSpace()).
    • lp_sensitivity_report has been added which significantly improves the performance of querying the sensitivity summary of an LP. lp_objective_perturbation_range and lp_rhs_perturbation_range are deprecated.
    • Dual warm-starts are now supported with set_dual_start_value and dual_start_value.
    • (\in<tab>) can now be used in macros instead of = or in.
    • Use haskey(model::Model, key::Symbol) to check if a name key is registered in a model.
    • Added unregister(model::Model, key::Symbol) to unregister a name key from model.
    • Added callback_node_status for use in callbacks.
    • Added print_bridge_graph to visualize the bridging graph generated by MathOptInterface.
    • Improved error message for containers with duplicate indices.

    Fixed

    • Various fixes to pass tests on Julia 1.6.
    • Fixed a bug in the printing of nonlinear expressions in IJulia.
    • Fixed a bug when nonlinear expressions are passed to user-defined functions.
    • Some internal functions that were previously exported are now no longer exported.
    • Fixed a bug when relaxing a fixed binary variable.
    • Fixed a StackOverflowError that occurred when SparseAxisArrays had a large number of elements.
    • Removed an unnecessary type assertion in list_of_constraint_types.
    • Fixed a bug when copying models with registered expressions.

    Other

    • The documentation has been significantly overhauled. It now has distinct sections for the manual, API reference, and examples. The existing examples in /examples have now been moved to /docs/src/examples and rewritten using Literate.jl, and they are now included in the documentation.
    • JuliaFormatter has been applied to most of the codebase. This will continue to roll out over time, as we fix upstream issues in the formatter, and will eventually become compulsory.
    • The root cause of a large number of method invalidations has been resolved.
    • We switched continuous integration from Travis and Appveyor to GitHub Actions.

    Version 0.21.5 (September 18, 2020)

    Fixed

    • Fixed deprecation warnings
    • Throw DimensionMismatch for incompatibly sized functions and sets
    • Unify treatment of keys(x) on JuMP containers

    Version 0.21.4 (September 14, 2020)

    Added

    • Add debug info when adding unsupported constraints
    • Add relax_integrality for solving continuous relaxation
    • Allow querying constraint conflicts

    Fixed

    • Dispatch on Real for MOI.submit
    • Implement copy for CustomSet in tests
    • Don't export private macros
    • Fix invalid assertion in nonlinear
    • Error if constraint has NaN right-hand side
    • Improve speed of tests
    • Lots of work modularizing files in /test
    • Improve line numbers in macro error messages
    • Print nonlinear subexpressions
    • Various documentation updates
    • Dependency updates:
      • Datastructures 0.18
      • MathOptFormat v0.5
      • Prep for MathOptInterface 0.9.15

    Version 0.21.3 (June 18, 2020)

    • Added Special Order Sets (SOS1 and SOS2) to JuMP with default weights to ease the creation of such constraints (#2212).
    • Added functions simplex_iterations, barrier_iterations and node_count (#2201).
    • Added function reduced_cost (#2205).
    • Implemented callback_value for affine and quadratic expressions (#2231).
    • Support MutableArithmetics.Zero in objective and constraints (#2219).
    • Documentation improvements:
      • Mention tutorials in the docs (#2223).
      • Update COIN-OR links (#2242).
      • Explicit link to the documentation of MOI.FileFormats (#2253).
      • Typo fixes (#2261).
    • Containers improvements:
      • Fix Base.map for DenseAxisArray (#2235).
      • Throw BoundsError if number of indices is incorrect for DenseAxisArray and SparseAxisArray (#2240).
    • Extensibility improvements:
      • Implement a set_objective method fallback that redirects to set_objective_sense and set_objective_function (#2247).
      • Add parse_constraint method with arbitrary number of arguments (#2051).
      • Add parse_constraint_expr and parse_constraint_head (#2228).

    Version 0.21.2 (April 2, 2020)

    • Added relative_gap() to access MOI.RelativeGap() attribute (#2199).
    • Documentation fixes:
      • Added link to source for docstrings in the documentation (#2207).
      • Added docstring for @variables macro (#2216).
      • Typo fixes (#2177, #2184, #2182).
    • Implementation of methods for Base functions:
      • Implemented Base.empty! for JuMP.Model (#2198).
      • Implemented Base.conj for JuMP scalar types (#2209).

    Fixed

    • Fixed sum of expression with scalar product in macro (#2178).
    • Fixed writing of nonlinear models to MathOptFormat (#2181).
    • Fixed construction of empty SparseAxisArray (#2179).
    • Fixed constraint with zero function (#2188).

    Version 0.21.1 (Feb 18, 2020)

    • Improved the clarity of the with_optimizer deprecation warning.

    Version 0.21.0 (Feb 16, 2020)

    Breaking

    • Deprecated with_optimizer (#2090, #2084, #2141). You can replace with_optimizer by either nothing, optimizer_with_attributes or a closure:

      • replace with_optimizer(Ipopt.Optimizer) by Ipopt.Optimizer.
      • replace with_optimizer(Ipopt.Optimizer, max_cpu_time=60.0) by optimizer_with_attributes(Ipopt.Optimizer, "max_cpu_time" => 60.0).
      • replace with_optimizer(Gurobi.Optimizer, env) by () -> Gurobi.Optimizer(env).
      • replace with_optimizer(Gurobi.Optimizer, env, Presolve=0) by optimizer_with_attributes(() -> Gurobi.Optimizer(env), "Presolve" => 0).

      alternatively to optimizer_with_attributes, you can also set the attributes separately with set_optimizer_attribute.

    • Renamed set_parameter and set_parameters to set_optimizer_attribute and set_optimizer_attributes (#2150).

    • Broadcast should now be explicit inside macros. @SDconstraint(model, x >= 1) and @constraint(model, x + 1 in SecondOrderCone()) now throw an error instead of broadcasting 1 along the dimension of x (#2107).

    • @SDconstraint(model, x >= 0) is now equivalent to @constraint(model, x in PSDCone()) instead of @constraint(model, (x .- 0) in PSDCone()) (#2107).

    • The macros now create the containers with map instead of for loops, as a consequence, containers created by @expression can now have any element type and containers of constraint references now have concrete element types when possible. This fixes a long-standing issue where @expression could only be used to generate a collection of linear expressions. Now it works for quadratic expressions as well (#2070).

    • Calling deepcopy(::AbstractModel) now throws an error.

    • The constraint name is now printed in the model string (#2108).

    Added

    • Added support for solver-independent and solver-specific callbacks (#2101).
    • Added write_to_file and read_from_file, supported formats are CBF, LP, MathOptFormat, MPS and SDPA (#2114).
    • Added support for complementarity constraints (#2132).
    • Added support for indicator constraints (#2092).
    • Added support for querying multiple solutions with the result keyword (#2100).
    • Added support for constraining variables on creation (#2128).
    • Added method delete that deletes a vector of variables at once if it is supported by the underlying solver (#2135).
    • The arithmetic between JuMP expression has be refactored into the MutableArithmetics package (#2107).
    • Improved error on complex values in NLP (#1978).
    • Added an example of column generation (#2010).

    Fixed

    • Incorrect coefficients generated when using Symmetric variables (#2102)

    Version 0.20.1 (Oct 18, 2019)

    • Add sections on @variables and @constraints in the documentation (#2062).
    • Fixed product of sparse matrices for Julia v1.3 (#2063).
    • Added set_objective_coefficient to modify the coefficient of a linear term of the objective function (#2008).
    • Added set_time_limit_sec, unset_time_limit_sec and time_limit_sec to set and query the time limit for the solver in seconds (#2053).

    Version 0.20.0 (Aug 24, 2019)

    • Documentation updates.
    • Numerous bug fixes.
    • Better error messages (#1977, #1978, #1997, #2017).
    • Performance improvements (#1947, #2032).
    • Added LP sensitivity summary functions lp_objective_perturbation_range and lp_rhs_perturbation_range (#1917).
    • Added functions dual_objective_value, raw_status and set_parameter.
    • Added function set_objective_coefficient to modify the coefficient of a linear term of the objective (#2008).
    • Added functions set_normalized_rhs, normalized_rhs, and add_to_function_constant to modify and get the constant part of a constraint (#1935, #1960).
    • Added functions set_normalized_coefficient and normalized_coefficient to modify and get the coefficient of a linear term of a constraint (#1935, #1960).
    • Numerous other improvements in MOI 0.9, see the NEWS.md file of MOI for more details.

    Version 0.19.2 (June 8, 2019)

    • Fix a bug in derivatives that could arise in models with nested nonlinear subexpressions.

    Version 0.19.1 (May 12, 2019)

    • Usability and performance improvements.
    • Bug fixes.

    Version 0.19.0 (February 15, 2019)

    JuMP 0.19 contains significant breaking changes.

    Breaking

    • JuMP's abstraction layer for communicating with solvers changed from MathProgBase (MPB) to MathOptInterface (MOI). MOI addresses many longstanding design issues. (See @mlubin's slides from JuMP-dev 2018.) JuMP 0.19 is compatible only with solvers that have been updated for MOI. See the installation guide for a list of solvers that have and have not yet been updated.

    • Most solvers have been renamed to PackageName.Optimizer. For example, GurobiSolver() is now Gurobi.Optimizer.

    • Solvers are no longer added to a model via Model(solver = XXX(kwargs...)). Instead use Model(with_optimizer(XXX, kwargs...)). For example, Model(with_optimizer(Gurobi.Optimizer, OutputFlag=0)).

    • JuMP containers (for example, the objects returned by @variable) have been redesigned. Containers.SparseAxisArray replaces JuMPDict, JuMPArray was rewritten (inspired by AxisArrays) and renamed Containers.DenseAxisArray, and you can now request a container type with the container= keyword to the macros. See the corresponding documentation for more details.

    • The statuses returned by solvers have changed. See the possible status values here. The MOI statuses are much richer than the MPB statuses and can be used to distinguish between previously indistinguishable cases (for example, did the solver have a feasible solution when it stopped because of the time limit?).

    • Starting values are separate from result values. Use value to query the value of a variable in a solution. Use start_value and set_start_value to get and set an initial starting point provided to the solver. The solutions from previous solves are no longer automatically set as the starting points for the next solve.

    • The data structures for affine and quadratic expressions AffExpr and QuadExpr have changed. Internally, terms are stored in dictionaries instead of lists. Duplicate coefficients can no longer exist. Accessors and iteration methods have changed.

    • JuMPNLPEvaluator no longer includes the linear and quadratic parts of the model in the evaluation calls. These are now handled separately to allow NLP solvers that support various types of constraints.

    • JuMP solver-independent callbacks have been replaced by solver-specific callbacks. See your favorite solver for more details. (See the note below: No solver-specific callbacks are implemented yet.)

    • The norm() syntax is no longer recognized inside macros. Use the SecondOrderCone() set instead.

    • JuMP no longer performs automatic transformation between special quadratic forms and second-order cone constraints. Support for these constraint classes depends on the solver.

    • The symbols :Min and :Max are no longer used as optimization senses. Instead, JuMP uses the OptimizationSense enum from MathOptInterface. @objective(model, Max, ...), @objective(model, Min, ...), @NLobjective(model, Max, ...), and @objective(model, Min, ...) remain valid, but @objective(m, :Max, ...) is no longer accepted.

    • The sign conventions for duals has changed in some cases for consistency with conic duality (see the documentation). The shadow_price helper method returns duals with signs that match conventional LP interpretations of dual values as sensitivities of the objective value to relaxations of constraints.

    • @constraintref is no longer defined. Instead, create the appropriate container to hold constraint references manually. For example,

      constraints = Dict() # Optionally, specify types for improved performance.
      +new_b = backend(model)
    • All usages of @SDconstraint are deprecated. The new syntax is @constraint(model, X >= Y, PSDCone()).
    • Creating a DenseAxisArray with a Number as an axis will now display a warning. This catches a common error in which users write @variable(model, x[length(S)]) instead of @variable(model, x[1:length(S)]).
    • The caching_mode argument to Model, for example, Model(caching_mode = MOIU.MANUAL) mode has been removed. For more control over the optimizer, use direct_model instead.
    • The previously deprecated lp_objective_perturbation_range and lp_rhs_perturbation_range functions have been removed. Use lp_sensitivity_report instead.
    • The .m fields of NonlinearExpression and NonlinearParameter have been renamed to .model.
    • Infinite variable bounds are now ignored. Thus, @variable(model, x <= Inf) will show has_upper_bound(x) == false. Previously, these bounds were passed through to the solvers which caused numerical issues for solvers expecting finite bounds.
    • The variable_type and constraint_type functions were removed. This should only affect users who previously wrote JuMP extensions. The functions can be deleted without consequence.
    • The internal functions moi_mode, moi_bridge_constraints, moi_add_constraint, and moi_add_to_function_constant are no longer exported.
    • The un-used method Containers.generate_container has been deleted.
    • The Containers API has been refactored, and _build_ref_sets is now public as Containers.build_ref_sets.
    • The parse_constraint_ methods for extending @constraint at parse time have been refactored in a breaking way. Consult the Extensions documentation for more details and examples.

    Added

    • The TerminationStatusCode and ResultStatusCode enums are now exported by JuMP. Prefer termination_status(model) == OPTIMAL instead of == MOI.OPTIMAL, although the MOI. prefix way still works.
    • Copy a x::DenseAxisArray to an Array by calling Array(x).
    • NonlinearExpression is now a subtype of AbstractJuMPScalar
    • Constraints such as @constraint(model, x + 1 in MOI.Integer()) are now supported.
    • primal_feasibility_report now accepts a function as the first argument.
    • Scalar variables @variable(model, x[1:2] in MOI.Integer()) creates two variables, both of which are constrained to be in the set MOI.Integer.
    • Conic constraints can now be specified as inequalities under a different partial ordering. So @constraint(model, x - y in MOI.Nonnegatives()) can now be written as @constraint(model, x >= y, MOI.Nonnegatives()).
    • Names are now set for vectorized constraints.

    Fixed

    • Fixed a performance issue when show was called on a SparseAxisArray with a large number of elements.
    • Fixed a bug displaying barrier and simplex iterations in solution_summary.
    • Fixed a bug by implementing hash for DenseAxisArray and SparseAxisArray.
    • Names are now only set if the solver supports them. Previously, this prevented solvers such as Ipopt from being used with direct_model.
    • MutableArithmetics.Zero is converted into a 0.0 before being returned to the user. Previously, some calls to @expression would return the undocumented MutableArithmetics.Zero() object. One example is summing over an empty set @expression(model, sum(x[i] for i in 1:0)). You will now get 0.0 instead.
    • AffExpr and QuadExpr can now be used with == 0 instead of iszero. This fixes a number of issues relating to Julia standard libraries such as LinearAlgebra and SparseArrays.
    • Fixed a bug when registering a user-defined function with splatting.

    Other

    • The documentation is now available as a PDF.
    • The documentation now includes a full copy of the MathOptInterface documentation to make it easy to link concepts between the docs. (The MathOptInterface documentation has also been significantly improved.)
    • The documentation contains a large number of improvements and clarifications on a range of topics. Thanks to @sshin23, @DilumAluthge, and @jlwether.
    • The documentation is now built with Julia 1.6 instead of 1.0.
    • Various error messages have been improved to be more readable.

    Version 0.21.10 (September 4, 2021)

    Added

    • Added add_NL_expression
    • add_NL_xxx functions now support AffExpr and QuadExpr as terms

    Fixed

    • Fixed a bug in solution_summary
    • Fixed a bug in relax_integrality

    Other

    • Improved error message in lp_sensitivity_report

    Version 0.21.9 (August 1, 2021)

    Added

    • Containers now support arbitrary container types by passing the type to the container keyword and overloading Containers.container.
    • is_valid now supports nonlinear constraints
    • Added unsafe_backend for querying the inner-most optimizer of a JuMP model.
    • Nonlinear parameters now support the plural @NLparameters macro.
    • Containers (for example, DenseAxisArray) can now be used in vector-valued constraints.

    Other

    • Various improvements to the documentation.

    Version 0.21.8 (May 8, 2021)

    Added

    • The @constraint macro is now extendable in the same way as @variable.
    • AffExpr and QuadExpr can now be used in nonlinear macros.

    Fixed

    • Fixed a bug in lp_sensitivity_report.
    • Fixed an inference issue when creating empty SparseAxisArrays.

    Version 0.21.7 (April 12, 2021)

    Added

    • Added primal_feasibility_report, which can be used to check whether a primal point satisfies primal feasibility.
    • Added coefficient, which returns the coefficient associated with a variable in affine and quadratic expressions.
    • Added copy_conflict, which returns the IIS of an infeasible model.
    • Added solution_summary, which returns (and prints) a struct containing a summary of the solution.
    • Allow AbstractVector in vector constraints instead of just Vector.
    • Added latex_formulation(model) which returns an object representing the latex formulation of a model. Use print(latex_formulation(model)) to print the formulation as a string.
    • User-defined functions in nonlinear expressions are now automatically registered to aid quick model prototyping. However, a warning is printed to encourage the manual registration.
    • DenseAxisArray's now support broadcasting over multiple arrays.
    • Container indices can now be iterators of Base.SizeUnknown.

    Fixed

    • Fixed bug in rad2deg and deg2rad in nonlinear expressions.
    • Fixed a MethodError bug in Containers when forcing container type.
    • Allow partial slicing of a DenseAxisArray, resolving an issue from 2014.
    • Fixed a bug printing variable names in IJulia.
    • Ending an IJulia cell with model now prints a summary of the model (like in the REPL) not the latex formulation. Use print(model) to print the latex formulation.
    • Fixed a bug when copying models containing nested arrays.

    Other

    • Tutorials are now part of the documentation, and more refactoring has taken place.
    • Added JuliaFormatter added as a code formatter.
    • Added some precompilation statements to reduce initial latency.
    • Various improvements to error messages to make them more helpful.
    • Improved performance of value(::NonlinearExpression).
    • Improved performance of fix(::VariableRef).

    Version 0.21.6 (January 29, 2021)

    Added

    • Added support for skew symmetric variables via @variable(model, X[1:2, 1:2] in SkewSymmetricMatrixSpace()).
    • lp_sensitivity_report has been added which significantly improves the performance of querying the sensitivity summary of an LP. lp_objective_perturbation_range and lp_rhs_perturbation_range are deprecated.
    • Dual warm-starts are now supported with set_dual_start_value and dual_start_value.
    • (\in<tab>) can now be used in macros instead of = or in.
    • Use haskey(model::Model, key::Symbol) to check if a name key is registered in a model.
    • Added unregister(model::Model, key::Symbol) to unregister a name key from model.
    • Added callback_node_status for use in callbacks.
    • Added print_bridge_graph to visualize the bridging graph generated by MathOptInterface.
    • Improved error message for containers with duplicate indices.

    Fixed

    • Various fixes to pass tests on Julia 1.6.
    • Fixed a bug in the printing of nonlinear expressions in IJulia.
    • Fixed a bug when nonlinear expressions are passed to user-defined functions.
    • Some internal functions that were previously exported are now no longer exported.
    • Fixed a bug when relaxing a fixed binary variable.
    • Fixed a StackOverflowError that occurred when SparseAxisArrays had a large number of elements.
    • Removed an unnecessary type assertion in list_of_constraint_types.
    • Fixed a bug when copying models with registered expressions.

    Other

    • The documentation has been significantly overhauled. It now has distinct sections for the manual, API reference, and examples. The existing examples in /examples have now been moved to /docs/src/examples and rewritten using Literate.jl, and they are now included in the documentation.
    • JuliaFormatter has been applied to most of the codebase. This will continue to roll out over time, as we fix upstream issues in the formatter, and will eventually become compulsory.
    • The root cause of a large number of method invalidations has been resolved.
    • We switched continuous integration from Travis and Appveyor to GitHub Actions.

    Version 0.21.5 (September 18, 2020)

    Fixed

    • Fixed deprecation warnings
    • Throw DimensionMismatch for incompatibly sized functions and sets
    • Unify treatment of keys(x) on JuMP containers

    Version 0.21.4 (September 14, 2020)

    Added

    • Add debug info when adding unsupported constraints
    • Add relax_integrality for solving continuous relaxation
    • Allow querying constraint conflicts

    Fixed

    • Dispatch on Real for MOI.submit
    • Implement copy for CustomSet in tests
    • Don't export private macros
    • Fix invalid assertion in nonlinear
    • Error if constraint has NaN right-hand side
    • Improve speed of tests
    • Lots of work modularizing files in /test
    • Improve line numbers in macro error messages
    • Print nonlinear subexpressions
    • Various documentation updates
    • Dependency updates:
      • Datastructures 0.18
      • MathOptFormat v0.5
      • Prep for MathOptInterface 0.9.15

    Version 0.21.3 (June 18, 2020)

    • Added Special Order Sets (SOS1 and SOS2) to JuMP with default weights to ease the creation of such constraints (#2212).
    • Added functions simplex_iterations, barrier_iterations and node_count (#2201).
    • Added function reduced_cost (#2205).
    • Implemented callback_value for affine and quadratic expressions (#2231).
    • Support MutableArithmetics.Zero in objective and constraints (#2219).
    • Documentation improvements:
      • Mention tutorials in the docs (#2223).
      • Update COIN-OR links (#2242).
      • Explicit link to the documentation of MOI.FileFormats (#2253).
      • Typo fixes (#2261).
    • Containers improvements:
      • Fix Base.map for DenseAxisArray (#2235).
      • Throw BoundsError if number of indices is incorrect for DenseAxisArray and SparseAxisArray (#2240).
    • Extensibility improvements:
      • Implement a set_objective method fallback that redirects to set_objective_sense and set_objective_function (#2247).
      • Add parse_constraint method with arbitrary number of arguments (#2051).
      • Add parse_constraint_expr and parse_constraint_head (#2228).

    Version 0.21.2 (April 2, 2020)

    • Added relative_gap() to access MOI.RelativeGap() attribute (#2199).
    • Documentation fixes:
      • Added link to source for docstrings in the documentation (#2207).
      • Added docstring for @variables macro (#2216).
      • Typo fixes (#2177, #2184, #2182).
    • Implementation of methods for Base functions:
      • Implemented Base.empty! for JuMP.Model (#2198).
      • Implemented Base.conj for JuMP scalar types (#2209).

    Fixed

    • Fixed sum of expression with scalar product in macro (#2178).
    • Fixed writing of nonlinear models to MathOptFormat (#2181).
    • Fixed construction of empty SparseAxisArray (#2179).
    • Fixed constraint with zero function (#2188).

    Version 0.21.1 (Feb 18, 2020)

    • Improved the clarity of the with_optimizer deprecation warning.

    Version 0.21.0 (Feb 16, 2020)

    Breaking

    • Deprecated with_optimizer (#2090, #2084, #2141). You can replace with_optimizer by either nothing, optimizer_with_attributes or a closure:

      • replace with_optimizer(Ipopt.Optimizer) by Ipopt.Optimizer.
      • replace with_optimizer(Ipopt.Optimizer, max_cpu_time=60.0) by optimizer_with_attributes(Ipopt.Optimizer, "max_cpu_time" => 60.0).
      • replace with_optimizer(Gurobi.Optimizer, env) by () -> Gurobi.Optimizer(env).
      • replace with_optimizer(Gurobi.Optimizer, env, Presolve=0) by optimizer_with_attributes(() -> Gurobi.Optimizer(env), "Presolve" => 0).

      alternatively to optimizer_with_attributes, you can also set the attributes separately with set_optimizer_attribute.

    • Renamed set_parameter and set_parameters to set_optimizer_attribute and set_optimizer_attributes (#2150).

    • Broadcast should now be explicit inside macros. @SDconstraint(model, x >= 1) and @constraint(model, x + 1 in SecondOrderCone()) now throw an error instead of broadcasting 1 along the dimension of x (#2107).

    • @SDconstraint(model, x >= 0) is now equivalent to @constraint(model, x in PSDCone()) instead of @constraint(model, (x .- 0) in PSDCone()) (#2107).

    • The macros now create the containers with map instead of for loops, as a consequence, containers created by @expression can now have any element type and containers of constraint references now have concrete element types when possible. This fixes a long-standing issue where @expression could only be used to generate a collection of linear expressions. Now it works for quadratic expressions as well (#2070).

    • Calling deepcopy(::AbstractModel) now throws an error.

    • The constraint name is now printed in the model string (#2108).

    Added

    • Added support for solver-independent and solver-specific callbacks (#2101).
    • Added write_to_file and read_from_file, supported formats are CBF, LP, MathOptFormat, MPS and SDPA (#2114).
    • Added support for complementarity constraints (#2132).
    • Added support for indicator constraints (#2092).
    • Added support for querying multiple solutions with the result keyword (#2100).
    • Added support for constraining variables on creation (#2128).
    • Added method delete that deletes a vector of variables at once if it is supported by the underlying solver (#2135).
    • The arithmetic between JuMP expression has be refactored into the MutableArithmetics package (#2107).
    • Improved error on complex values in NLP (#1978).
    • Added an example of column generation (#2010).

    Fixed

    • Incorrect coefficients generated when using Symmetric variables (#2102)

    Version 0.20.1 (Oct 18, 2019)

    • Add sections on @variables and @constraints in the documentation (#2062).
    • Fixed product of sparse matrices for Julia v1.3 (#2063).
    • Added set_objective_coefficient to modify the coefficient of a linear term of the objective function (#2008).
    • Added set_time_limit_sec, unset_time_limit_sec and time_limit_sec to set and query the time limit for the solver in seconds (#2053).

    Version 0.20.0 (Aug 24, 2019)

    • Documentation updates.
    • Numerous bug fixes.
    • Better error messages (#1977, #1978, #1997, #2017).
    • Performance improvements (#1947, #2032).
    • Added LP sensitivity summary functions lp_objective_perturbation_range and lp_rhs_perturbation_range (#1917).
    • Added functions dual_objective_value, raw_status and set_parameter.
    • Added function set_objective_coefficient to modify the coefficient of a linear term of the objective (#2008).
    • Added functions set_normalized_rhs, normalized_rhs, and add_to_function_constant to modify and get the constant part of a constraint (#1935, #1960).
    • Added functions set_normalized_coefficient and normalized_coefficient to modify and get the coefficient of a linear term of a constraint (#1935, #1960).
    • Numerous other improvements in MOI 0.9, see the NEWS.md file of MOI for more details.

    Version 0.19.2 (June 8, 2019)

    • Fix a bug in derivatives that could arise in models with nested nonlinear subexpressions.

    Version 0.19.1 (May 12, 2019)

    • Usability and performance improvements.
    • Bug fixes.

    Version 0.19.0 (February 15, 2019)

    JuMP 0.19 contains significant breaking changes.

    Breaking

    • JuMP's abstraction layer for communicating with solvers changed from MathProgBase (MPB) to MathOptInterface (MOI). MOI addresses many longstanding design issues. (See @mlubin's slides from JuMP-dev 2018.) JuMP 0.19 is compatible only with solvers that have been updated for MOI. See the installation guide for a list of solvers that have and have not yet been updated.

    • Most solvers have been renamed to PackageName.Optimizer. For example, GurobiSolver() is now Gurobi.Optimizer.

    • Solvers are no longer added to a model via Model(solver = XXX(kwargs...)). Instead use Model(with_optimizer(XXX, kwargs...)). For example, Model(with_optimizer(Gurobi.Optimizer, OutputFlag=0)).

    • JuMP containers (for example, the objects returned by @variable) have been redesigned. Containers.SparseAxisArray replaces JuMPDict, JuMPArray was rewritten (inspired by AxisArrays) and renamed Containers.DenseAxisArray, and you can now request a container type with the container= keyword to the macros. See the corresponding documentation for more details.

    • The statuses returned by solvers have changed. See the possible status values here. The MOI statuses are much richer than the MPB statuses and can be used to distinguish between previously indistinguishable cases (for example, did the solver have a feasible solution when it stopped because of the time limit?).

    • Starting values are separate from result values. Use value to query the value of a variable in a solution. Use start_value and set_start_value to get and set an initial starting point provided to the solver. The solutions from previous solves are no longer automatically set as the starting points for the next solve.

    • The data structures for affine and quadratic expressions AffExpr and QuadExpr have changed. Internally, terms are stored in dictionaries instead of lists. Duplicate coefficients can no longer exist. Accessors and iteration methods have changed.

    • JuMPNLPEvaluator no longer includes the linear and quadratic parts of the model in the evaluation calls. These are now handled separately to allow NLP solvers that support various types of constraints.

    • JuMP solver-independent callbacks have been replaced by solver-specific callbacks. See your favorite solver for more details. (See the note below: No solver-specific callbacks are implemented yet.)

    • The norm() syntax is no longer recognized inside macros. Use the SecondOrderCone() set instead.

    • JuMP no longer performs automatic transformation between special quadratic forms and second-order cone constraints. Support for these constraint classes depends on the solver.

    • The symbols :Min and :Max are no longer used as optimization senses. Instead, JuMP uses the OptimizationSense enum from MathOptInterface. @objective(model, Max, ...), @objective(model, Min, ...), @NLobjective(model, Max, ...), and @objective(model, Min, ...) remain valid, but @objective(m, :Max, ...) is no longer accepted.

    • The sign conventions for duals has changed in some cases for consistency with conic duality (see the documentation). The shadow_price helper method returns duals with signs that match conventional LP interpretations of dual values as sensitivities of the objective value to relaxations of constraints.

    • @constraintref is no longer defined. Instead, create the appropriate container to hold constraint references manually. For example,

      constraints = Dict() # Optionally, specify types for improved performance.
       for i in 1:N
         constraints[i] = @constraint(model, ...)
      -end
    • The lowerbound, upperbound, and basename keyword arguments to the @variable macro have been renamed to lower_bound, upper_bound, and base_name, for consistency with JuMP's new style recommendations.

    • We rely on broadcasting syntax to apply accessors to collections of variables, for example, value.(x) instead of getvalue(x) for collections. (Use value(x) when x is a scalar object.)

    Added

    • Splatting (like f(x...)) is recognized in restricted settings in nonlinear expressions.

    • Support for deleting constraints and variables.

    • The documentation has been completely rewritten using docstrings and Documenter.

    • Support for modeling mixed conic and quadratic models (for example, conic models with quadratic objectives and bi-linear matrix inequalities).

    • Significantly improved support for modeling new types of constraints and for extending JuMP's macros.

    • Support for providing dual warm starts.

    • Improved support for accessing solver-specific attributes (for example, the irreducible inconsistent subsystem).

    • Explicit control of whether symmetry-enforcing constraints are added to PSD constraints.

    • Support for modeling exponential cones.

    • Significant improvements in internal code quality and testing.

    • Style and naming guidelines.

    • Direct mode and manual mode provide explicit control over when copies of a model are stored or regenerated. See the corresponding documentation.

    Regressions

    There are known regressions from JuMP 0.18 that will be addressed in a future release (0.19.x or later):

    • Performance regressions in model generation (issue). Please file an issue anyway if you notice a significant performance regression. We have plans to address a number of performance issues, but we might not be aware of all of them.

    • Fast incremental NLP solves are not yet reimplemented (issue).

    • We do not yet have an implementation of solver-specific callbacks.

    • The column generation syntax in @variable has been removed (that is, the objective, coefficients, and inconstraints keyword arguments). Support for column generation will be re-introduced in a future release.

    • The ability to solve the continuous relaxation (that is, via solve(model; relaxation = true)) is not yet reimplemented (issue).

    Version 0.18.5 (December 1, 2018)

    • Support views in some derivative evaluation functions.
    • Improved compatibility with PackageCompiler.

    Version 0.18.4 (October 8, 2018)

    • Fix a bug in model printing on Julia 0.7 and 1.0.

    Version 0.18.3 (October 1, 2018)

    • Add support for Julia v1.0 (Thanks @ExpandingMan)
    • Fix matrix expressions with quadratic functions (#1508)

    Version 0.18.2 (June 10, 2018)

    • Fix a bug in second-order derivatives when expressions are present (#1319)
    • Fix a bug in @constraintref (#1330)

    Version 0.18.1 (April 9, 2018)

    • Fix for nested tuple destructuring (#1193)
    • Preserve internal model when relaxation=true (#1209)
    • Minor bug fixes and updates for example

    Version 0.18.0 (July 27, 2017)

    • Drop support for Julia 0.5.
    • Update for ForwardDiff 0.5.
    • Minor bug fixes.

    Version 0.17.1 (June 9, 2017)

    • Use of constructconstraint! in @SDconstraint.
    • Minor bug fixes.

    Version 0.17.0 (May 27, 2017)

    • Breaking change: Mixing quadratic and conic constraints is no longer supported.
    • Breaking change: The getvariable and getconstraint functions are replaced by indexing on the corresponding symbol. For instance, to access the variable with name x, one should now write m[:x] instead of getvariable(m, :x). As a consequence, creating a variable and constraint with the same name now triggers a warning, and accessing one of them afterwards throws an error. This change is breaking only in the latter case.
    • Addition of the getobjectivebound function that mirrors the functionality of the MathProgBase getobjbound function except that it takes into account transformations performed by JuMP.
    • Minor bug fixes.

    The following changes are primarily of interest to developers of JuMP extensions:

    • The new syntax @constraint(model, expr in Cone) creates the constraint ensuring that expr is inside Cone. The Cone argument is passed to constructconstraint! which enables the call to the dispatched to an extension.
    • The @variable macro now calls constructvariable! instead of directly calling the Variable constructor. Extra arguments and keyword arguments passed to @variable are passed to constructvariable! which enables the call to be dispatched to an extension.
    • Refactor the internal function conicdata (used build the MathProgBase conic model) into smaller sub-functions to make these parts reusable by extensions.

    Version 0.16.2 (March 28, 2017)

    • Minor bug fixes and printing tweaks
    • Address deprecation warnings for Julia 0.6

    Version 0.16.1 (March 7, 2017)

    • Better support for AbstractArray in JuMP (Thanks @tkoolen)
    • Minor bug fixes

    Version 0.16.0 (February 23, 2017)

    • Breaking change: JuMP no longer has a mechanism for selecting solvers by default (the previous mechanism was flawed and incompatible with Julia 0.6). Not specifying a solver before calling solve() will result in an error.
    • Breaking change: User-defined functions are no longer global. The first argument to JuMP.register is now a JuMP Model object within whose scope the function will be registered. Calling JuMP.register without a Model now produces an error.
    • Breaking change: Use the new JuMP.fix method to fix a variable to a value or to update the value to which a variable is fixed. Calling setvalue on a fixed variable now results in an error in order to avoid silent behavior changes. (Thanks @joaquimg)
    • Nonlinear expressions now print out similarly to linear/quadratic expressions (useful for debugging!)
    • New category keyword to @variable. Used for specifying categories of anonymous variables.
    • Compatibility with Julia 0.6-dev.
    • Minor fixes and improvements (Thanks @cossio, @ccoffrin, @blegat)

    Version 0.15.1 (January 31, 2017)

    • Bugfix for @LinearConstraints and friends

    Version 0.15.0 (December 22, 2016)

    • Julia 0.5.0 is the minimum required version for this release.
    • Document support for BARON solver
    • Enable info callbacks in more states than before, for example, for recording solutions. New when argument to addinfocallback (#814, thanks @yeesian)
    • Improved support for anonymous variables. This includes new warnings for potentially confusing use of the traditional non-anonymous syntax:
      • When multiple variables in a model are given the same name
      • When non-symbols are used as names, for example, @variable(m, x[1][1:N])
    • Improvements in iterating over JuMP containers (#836, thanks @IssamT)
    • Support for writing variable names in .lp file output (Thanks @leethargo)
    • Support for querying duals to SDP problems (Thanks @blegat)
    • The comprehension syntax with curly braces sum{}, prod{}, and norm2{} has been deprecated in favor of Julia's native comprehension syntax sum(), prod() and norm() as previously announced. (For early adopters of the new syntax, norm2() was renamed to norm() without deprecation.)
    • Unit tests rewritten to use Base.Test instead of FactCheck
    • Improved support for operations with matrices of JuMP types (Thanks @ExpandingMan)
    • The syntax to halt a solver from inside a callback has changed from throw(CallbackAbort()) to return JuMP.StopTheSolver
    • Minor bug fixes

    Version 0.14.2 (December 12, 2016)

    • Allow singleton anonymous variables (includes bugfix)

    Version 0.14.1 (September 12, 2016)

    • More consistent handling of states in informational callbacks, includes a new when parameter to addinfocallback for specifying in which state an informational callback should be called.

    Version 0.14.0 (August 7, 2016)

    • Compatibility with Julia 0.5 and ForwardDiff 0.2
    • Support for "anonymous" variables, constraints, expressions, and parameters, for example, x = @variable(m, [1:N]) instead of @variable(m, x[1:N])
    • Support for retrieving constraints from a model by name via getconstraint
    • @NLconstraint now returns constraint references (as expected).
    • Support for vectorized expressions within lazy constraints
    • On Julia 0.5, parse new comprehension syntax sum(x[i] for i in 1:N if isodd(i)) instead of sum{ x[i], i in 1:N; isodd(i) }. The old syntax with curly braces will be deprecated in JuMP 0.15.
    • Now possible to provide nonlinear expressions as "raw" Julia Expr objects instead of using JuMP's nonlinear macros. This input format is useful for programmatically generated expressions.
    • s/Mathematical Programming/Mathematical Optimization/
    • Support for local cuts (Thanks to @madanim, Mehdi Madani)
    • Document Xpress interface developed by @joaquimg, Joaquim Dias Garcia
    • Minor bug and deprecation fixes (Thanks @odow, @jrevels)

    Version 0.13.2 (May 16, 2016)

    • Compatibility update for MathProgBase

    Version 0.13.1 (May 3, 2016)

    • Fix broken deprecation for registerNLfunction.

    Version 0.13.0 (April 29, 2016)

    • Most exported methods and macros have been renamed to avoid camelCase. See the list of changes here. There is a 1-1 mapping from the old names to the new, and it is safe to simply replace the names to update existing models.
    • Specify variable lower/upper bounds in @variable using the lowerbound and upperbound keyword arguments.
    • Change name printed for variable using the basename keyword argument to @variable.
    • New @variables macro allows multi-line declaration of groups of variables.
    • A number of solver methods previously available only through MathProgBase are now exposed directly in JuMP. The fix was recorded live.
    • Compatibility fixes with Julia 0.5.
    • The "end" indexing syntax is no longer supported within JuMPArrays which do not use 1-based indexing until upstream issues are resolved, see here.

    Version 0.12.2 (March 9, 2016)

    • Small fixes for nonlinear optimization

    Version 0.12.1 (March 1, 2016)

    • Fix a regression in slicing for JuMPArrays (when not using 1-based indexing)

    Version 0.12.0 (February 27, 2016)

    • The automatic differentiation functionality has been completely rewritten with a number of user-facing changes:
      • @defExpr and @defNLExpr now take the model as the first argument. The previous one-argument version of @defExpr is deprecated; all expressions should be named. For example, replace @defExpr(2x+y) with @defExpr(jump_model, my_expr, 2x+y).
      • JuMP no longer uses Julia's variable binding rules for efficiently re-solving a sequence of nonlinear models. Instead, we have introduced nonlinear parameters. This is a breaking change, so we have added a warning message when we detect models that may depend on the old behavior.
      • Support for user-defined functions integrated within nonlinear JuMP expressions.
    • Replaced iteration over AffExpr with Number-like scalar iteration; previous iteration behavior is now available via linearterms(::AffExpr).
    • Stopping the solver via throw(CallbackAbort()) from a callback no longer triggers an exception. Instead, solve() returns UserLimit status.
    • getDual() now works for conic problems (Thanks @emreyamangil.)

    Version 0.11.3 (February 4, 2016)

    • Bug-fix for problems with quadratic objectives and semidefinite constraints

    Version 0.11.2 (January 14, 2016)

    • Compatibility update for Mosek

    Version 0.11.1 (December 1, 2015)

    • Remove usage of @compat in tests.
    • Fix updating quadratic objectives for nonlinear models.

    Version 0.11.0 (November 30, 2015)

    • Julia 0.4.0 is the minimum required version for this release.
    • Fix for scoping semantics of index variables in sum{}. Index variables no longer leak into the surrounding scope.
    • Addition of the solve(m::Model, relaxation=true) keyword argument to solve the standard continuous relaxation of model m
    • The getConstraintBounds() method allows access to the lower and upper bounds of all constraints in a (nonlinear) model.
    • Update for breaking changes in MathProgBase

    Version 0.10.3 (November 20, 2015)

    • Fix a rare error when parsing quadratic expressions
    • Fix Variable() constructor with default arguments
    • Detect unrecognized keywords in solve()

    Version 0.10.2 (September 28, 2015)

    • Fix for deprecation warnings

    Version 0.10.1 (September 3, 2015)

    • Fixes for ambiguity warnings.
    • Fix for breaking change in precompilation syntax in Julia 0.4-pre

    Version 0.10.0 (August 31, 2015)

    • Support (on Julia 0.4 and later) for conditions in indexing @defVar and @addConstraint constructs, for example, @defVar(m, x[i=1:5,j=1:5; i+j >= 3])
    • Support for vectorized operations on Variables and expressions. See the documentation for details.
    • New getVar() method to access variables in a model by name
    • Support for semidefinite programming.
    • Dual solutions are now available for general nonlinear problems. You may call getDual on a reference object for a nonlinear constraint, and getDual on a variable object for Lagrange multipliers from active bounds.
    • Introduce warnings for two common performance traps: too many calls to getValue() on a collection of variables and use of the + operator in a loop to sum expressions.
    • Second-order cone constraints can be written directly with the norm() and norm2{} syntax.
    • Implement MathProgBase interface for querying Hessian-vector products.
    • Iteration over JuMPContainers is deprecated; instead, use the keys and values functions, and zip(keys(d),values(d)) for the old behavior.
    • @defVar returns Array{Variable,N} when each of N index sets are of the form 1:nᵢ.
    • Module precompilation: on Julia 0.4 and later, using JuMP is now much faster.

    Version 0.9.3 (August 11, 2015)

    • Fixes for FactCheck testing on julia v0.4.

    Version 0.9.2 (June 27, 2015)

    • Fix bug in @addConstraints.

    Version 0.9.1 (April 25, 2015)

    • Fix for Julia 0.4-dev.
    • Small infrastructure improvements for extensions.

    Version 0.9.0 (April 18, 2015)

    • Comparison operators for constructing constraints (for example, 2x >= 1) have been deprecated. Instead, construct the constraints explicitly in the @addConstraint macro to add them to the model, or in the @LinearConstraint macro to create a stand-alone linear constraint instance.
    • getValue() method implemented to compute the value of a nonlinear subexpression
    • JuMP is now released under the Mozilla Public License version 2.0 (was previously LGPL). MPL is a copyleft license which is less restrictive than LGPL, especially for embedding JuMP within other applications.
    • A number of performance improvements in ReverseDiffSparse for computing derivatives.
    • MathProgBase.getsolvetime(m) now returns the solution time reported by the solver, if available. (Thanks @odow, Oscar Dowson)
    • Formatting fix for LP format output. (Thanks @sbebo, Leonardo Taccari).

    Version 0.8.0 (February 17, 2015)

    • Nonlinear subexpressions now supported with the @defNLExpr macro.
    • SCS supported for solving second-order conic problems.
    • setXXXCallback family deprecated in favor of addXXXCallback.
    • Multiple callbacks of the same type can be registered.
    • Added support for informational callbacks via addInfoCallback.
    • A CallbackAbort exception can be thrown from callback to safely exit optimization.

    Version 0.7.4 (February 4, 2015)

    • Reduced costs and linear constraint duals are now accessible when quadratic constraints are present.
    • Two-sided nonlinear constraints are supported.
    • Methods for accessing the number of variables and constraints in a model are renamed.
    • New default procedure for setting initial values in nonlinear optimization: project zero onto the variable bounds.
    • Small bug fixes.

    Version 0.7.3 (January 14, 2015)

    • Fix a method ambiguity conflict with Compose.jl (cosmetic fix)

    Version 0.7.2 (January 9, 2015)

    • Fix a bug in sum(::JuMPDict)
    • Added the setCategory function to change a variables category (for example, continuous or binary)

    after construction, and getCategory to retrieve the variable category.

    Version 0.7.1 (January 2, 2015)

    • Fix a bug in parsing linear expressions in macros. Affects only Julia 0.4 and later.

    Version 0.7.0 (December 29, 2014)

    Linear/quadratic/conic programming

    • Breaking change: The syntax for column-wise model generation has been changed to use keyword arguments in @defVar.
    • On Julia 0.4 and later, variables and coefficients may be multiplied in any order within macros. That is, variable*coefficient is now valid syntax.
    • ECOS supported for solving second-order conic problems.

    Nonlinear programming

    • Support for skipping model generation when solving a sequence of nonlinear models with changing data.
    • Fix a memory leak when solving a sequence of nonlinear models.
    • The @addNLConstraint macro now supports the three-argument version to define sets of nonlinear constraints.
    • KNITRO supported as a nonlinear solver.
    • Speed improvements for model generation.
    • The @addNLConstraints macro supports adding multiple (groups of) constraints at once. Syntax is similar to @addConstraints.
    • Discrete variables allowed in nonlinear problems for solvers which support them (currently only KNITRO).

    General

    • Starting values for variables may now be specified with @defVar(m, x, start=value).
    • The setSolver function allows users to change the solver subsequent to model creation.
    • Support for "fixed" variables via the @defVar(m, x == 1) syntax.
    • Unit tests rewritten to use FactCheck.jl, improved testing across solvers.

    Version 0.6.3 (October 19, 2014)

    • Fix a bug in multiplying two AffExpr objects.

    Version 0.6.2 (October 11, 2014)

    • Further improvements and bug fixes for printing.
    • Fixed a bug in @defExpr.
    • Support for accessing expression graphs through the MathProgBase NLP interface.

    Version 0.6.1 (September 19, 2014)

    • Improvements and bug fixes for printing.

    Version 0.6.0 (September 9, 2014)

    • Julia 0.3.0 is the minimum required version for this release.
    • buildInternalModel(m::Model) added to build solver-level model in memory without optimizing.
    • Deprecate load_model_only keyword argument to solve.
    • Add groups of constraints with @addConstraints macro.
    • Unicode operators now supported, including for sum, for prod, and /
    • Quadratic constraints supported in @addConstraint macro.
    • Quadratic objectives supported in @setObjective macro.
    • MathProgBase solver-independent interface replaces Ipopt-specific interface for nonlinear problems
      • Breaking change: IpoptOptions no longer supported to specify solver options, use m = Model(solver=IpoptSolver(options...)) instead.
    • New solver interfaces: ECOS, NLopt, and nonlinear support for MOSEK
    • New option to control whether the lazy constraint callback is executed at each node in the B&B tree or just when feasible solutions are found
    • Add support for semicontinuous and semi-integer variables for those solvers that support them.
    • Add support for index dependencies (for example, triangular indexing) in @defVar, @addConstraint, and @defExpr (for example, @defVar(m, x[i=1:10,j=i:10])).
      • This required some changes to the internal structure of JuMP containers, which may break code that explicitly stored JuMPDict objects.

    Version 0.5.8 (September 24, 2014)

    • Fix a bug with specifying solvers (affects Julia 0.2 only)

    Version 0.5.7 (September 5, 2014)

    • Fix a bug in printing models

    Version 0.5.6 (September 2, 2014)

    • Add support for semicontinuous and semi-integer variables for those solvers that support them.
      • Breaking change: Syntax for Variable() constructor has changed (use of this interface remains discouraged)
    • Update for breaking changes in MathProgBase

    Version 0.5.5 (July 6, 2014)

    • Fix bug with problem modification: adding variables that did not appear in existing constraints or objective.

    Version 0.5.4 (June 19, 2014)

    • Update for breaking change in MathProgBase which reduces loading times for using JuMP
    • Fix error when MIPs not solved to optimality

    Version 0.5.3 (May 21, 2014)

    • Update for breaking change in ReverseDiffSparse

    Version 0.5.2 (May 9, 2014)

    • Fix compatibility with Julia 0.3 prerelease

    Version 0.5.1 (May 5, 2014)

    • Fix a bug in coefficient handling inside lazy constraints and user cuts

    Version 0.5.0 (May 2, 2014)

    • Support for nonlinear optimization with exact, sparse second-order derivatives automatically computed. Ipopt is currently the only solver supported.
    • getValue for AffExpr and QuadExpr
    • Breaking change: getSolverModel replaced by getInternalModel, which returns the internal MathProgBase-level model
    • Groups of constraints can be specified with @addConstraint (see documentation for details). This is not a breaking change.
    • dot(::JuMPDict{Variable},::JuMPDict{Variable}) now returns the corresponding quadratic expression.

    Version 0.4.1 (March 24, 2014)

    • Fix bug where change in objective sense was ignored when re-solving a model.
    • Fix issue with handling zero coefficients in AffExpr.

    Version 0.4.0 (March 10, 2014)

    • Support for SOS1 and SOS2 constraints.
    • Solver-independent callback for user heuristics.
    • dot and sum implemented for JuMPDict objects. Now you can say @addConstraint(m, dot(a,x) <= b).
    • Developers: support for extensions to JuMP. See definition of Model in src/JuMP.jl for more details.
    • Option to construct the low-level model before optimizing.

    Version 0.3.2 (February 17, 2014)

    • Improved model printing
      • Preliminary support for IJulia output

    Version 0.3.1 (January 30, 2014)

    • Documentation updates
    • Support for MOSEK
    • CPLEXLink renamed to CPLEX

    Version 0.3.0 (January 21, 2014)

    • Unbounded/infeasibility rays: getValue() will return the corresponding components of an unbounded ray when a model is unbounded, if supported by the selected solver. getDual() will return an infeasibility ray (Farkas proof) if a model is infeasible and the selected solver supports this feature.
    • Solver-independent callbacks for user generated cuts.
    • Use new interface for solver-independent QCQP.
    • setlazycallback renamed to setLazyCallback for consistency.

    Version 0.2.0 (December 15, 2013)

    Breaking

    • Objective sense is specified in setObjective instead of in the Model constructor.
    • lpsolver and mipsolver merged into single solver option.

    Added

    • Problem modification with efficient LP restarts and MIP warm-starts.
    • Relatedly, column-wise modeling now supported.
    • Solver-independent callbacks supported. Currently we support only a "lazy constraint" callback, which works with Gurobi, CPLEX, and GLPK. More callbacks coming soon.

    Version 0.1.2 (November 16, 2013)

    • Bug fixes for printing, improved error messages.
    • Allow AffExpr to be used in macros; for example, ex = y + z; @addConstraint(m, x + 2*ex <= 3)

    Version 0.1.1 (October 23, 2013)

    • Update for solver specification API changes in MathProgBase.

    Version 0.1.0 (October 3, 2013)

    • Initial public release.
    +end
  • The lowerbound, upperbound, and basename keyword arguments to the @variable macro have been renamed to lower_bound, upper_bound, and base_name, for consistency with JuMP's new style recommendations.

  • We rely on broadcasting syntax to apply accessors to collections of variables, for example, value.(x) instead of getvalue(x) for collections. (Use value(x) when x is a scalar object.)

Added

  • Splatting (like f(x...)) is recognized in restricted settings in nonlinear expressions.

  • Support for deleting constraints and variables.

  • The documentation has been completely rewritten using docstrings and Documenter.

  • Support for modeling mixed conic and quadratic models (for example, conic models with quadratic objectives and bi-linear matrix inequalities).

  • Significantly improved support for modeling new types of constraints and for extending JuMP's macros.

  • Support for providing dual warm starts.

  • Improved support for accessing solver-specific attributes (for example, the irreducible inconsistent subsystem).

  • Explicit control of whether symmetry-enforcing constraints are added to PSD constraints.

  • Support for modeling exponential cones.

  • Significant improvements in internal code quality and testing.

  • Style and naming guidelines.

  • Direct mode and manual mode provide explicit control over when copies of a model are stored or regenerated. See the corresponding documentation.

Regressions

There are known regressions from JuMP 0.18 that will be addressed in a future release (0.19.x or later):

  • Performance regressions in model generation (issue). Please file an issue anyway if you notice a significant performance regression. We have plans to address a number of performance issues, but we might not be aware of all of them.

  • Fast incremental NLP solves are not yet reimplemented (issue).

  • We do not yet have an implementation of solver-specific callbacks.

  • The column generation syntax in @variable has been removed (that is, the objective, coefficients, and inconstraints keyword arguments). Support for column generation will be re-introduced in a future release.

  • The ability to solve the continuous relaxation (that is, via solve(model; relaxation = true)) is not yet reimplemented (issue).

Version 0.18.5 (December 1, 2018)

  • Support views in some derivative evaluation functions.
  • Improved compatibility with PackageCompiler.

Version 0.18.4 (October 8, 2018)

  • Fix a bug in model printing on Julia 0.7 and 1.0.

Version 0.18.3 (October 1, 2018)

  • Add support for Julia v1.0 (Thanks @ExpandingMan)
  • Fix matrix expressions with quadratic functions (#1508)

Version 0.18.2 (June 10, 2018)

  • Fix a bug in second-order derivatives when expressions are present (#1319)
  • Fix a bug in @constraintref (#1330)

Version 0.18.1 (April 9, 2018)

  • Fix for nested tuple destructuring (#1193)
  • Preserve internal model when relaxation=true (#1209)
  • Minor bug fixes and updates for example

Version 0.18.0 (July 27, 2017)

  • Drop support for Julia 0.5.
  • Update for ForwardDiff 0.5.
  • Minor bug fixes.

Version 0.17.1 (June 9, 2017)

  • Use of constructconstraint! in @SDconstraint.
  • Minor bug fixes.

Version 0.17.0 (May 27, 2017)

  • Breaking change: Mixing quadratic and conic constraints is no longer supported.
  • Breaking change: The getvariable and getconstraint functions are replaced by indexing on the corresponding symbol. For instance, to access the variable with name x, one should now write m[:x] instead of getvariable(m, :x). As a consequence, creating a variable and constraint with the same name now triggers a warning, and accessing one of them afterwards throws an error. This change is breaking only in the latter case.
  • Addition of the getobjectivebound function that mirrors the functionality of the MathProgBase getobjbound function except that it takes into account transformations performed by JuMP.
  • Minor bug fixes.

The following changes are primarily of interest to developers of JuMP extensions:

  • The new syntax @constraint(model, expr in Cone) creates the constraint ensuring that expr is inside Cone. The Cone argument is passed to constructconstraint! which enables the call to the dispatched to an extension.
  • The @variable macro now calls constructvariable! instead of directly calling the Variable constructor. Extra arguments and keyword arguments passed to @variable are passed to constructvariable! which enables the call to be dispatched to an extension.
  • Refactor the internal function conicdata (used build the MathProgBase conic model) into smaller sub-functions to make these parts reusable by extensions.

Version 0.16.2 (March 28, 2017)

  • Minor bug fixes and printing tweaks
  • Address deprecation warnings for Julia 0.6

Version 0.16.1 (March 7, 2017)

  • Better support for AbstractArray in JuMP (Thanks @tkoolen)
  • Minor bug fixes

Version 0.16.0 (February 23, 2017)

  • Breaking change: JuMP no longer has a mechanism for selecting solvers by default (the previous mechanism was flawed and incompatible with Julia 0.6). Not specifying a solver before calling solve() will result in an error.
  • Breaking change: User-defined functions are no longer global. The first argument to JuMP.register is now a JuMP Model object within whose scope the function will be registered. Calling JuMP.register without a Model now produces an error.
  • Breaking change: Use the new JuMP.fix method to fix a variable to a value or to update the value to which a variable is fixed. Calling setvalue on a fixed variable now results in an error in order to avoid silent behavior changes. (Thanks @joaquimg)
  • Nonlinear expressions now print out similarly to linear/quadratic expressions (useful for debugging!)
  • New category keyword to @variable. Used for specifying categories of anonymous variables.
  • Compatibility with Julia 0.6-dev.
  • Minor fixes and improvements (Thanks @cossio, @ccoffrin, @blegat)

Version 0.15.1 (January 31, 2017)

  • Bugfix for @LinearConstraints and friends

Version 0.15.0 (December 22, 2016)

  • Julia 0.5.0 is the minimum required version for this release.
  • Document support for BARON solver
  • Enable info callbacks in more states than before, for example, for recording solutions. New when argument to addinfocallback (#814, thanks @yeesian)
  • Improved support for anonymous variables. This includes new warnings for potentially confusing use of the traditional non-anonymous syntax:
    • When multiple variables in a model are given the same name
    • When non-symbols are used as names, for example, @variable(m, x[1][1:N])
  • Improvements in iterating over JuMP containers (#836, thanks @IssamT)
  • Support for writing variable names in .lp file output (Thanks @leethargo)
  • Support for querying duals to SDP problems (Thanks @blegat)
  • The comprehension syntax with curly braces sum{}, prod{}, and norm2{} has been deprecated in favor of Julia's native comprehension syntax sum(), prod() and norm() as previously announced. (For early adopters of the new syntax, norm2() was renamed to norm() without deprecation.)
  • Unit tests rewritten to use Base.Test instead of FactCheck
  • Improved support for operations with matrices of JuMP types (Thanks @ExpandingMan)
  • The syntax to halt a solver from inside a callback has changed from throw(CallbackAbort()) to return JuMP.StopTheSolver
  • Minor bug fixes

Version 0.14.2 (December 12, 2016)

  • Allow singleton anonymous variables (includes bugfix)

Version 0.14.1 (September 12, 2016)

  • More consistent handling of states in informational callbacks, includes a new when parameter to addinfocallback for specifying in which state an informational callback should be called.

Version 0.14.0 (August 7, 2016)

  • Compatibility with Julia 0.5 and ForwardDiff 0.2
  • Support for "anonymous" variables, constraints, expressions, and parameters, for example, x = @variable(m, [1:N]) instead of @variable(m, x[1:N])
  • Support for retrieving constraints from a model by name via getconstraint
  • @NLconstraint now returns constraint references (as expected).
  • Support for vectorized expressions within lazy constraints
  • On Julia 0.5, parse new comprehension syntax sum(x[i] for i in 1:N if isodd(i)) instead of sum{ x[i], i in 1:N; isodd(i) }. The old syntax with curly braces will be deprecated in JuMP 0.15.
  • Now possible to provide nonlinear expressions as "raw" Julia Expr objects instead of using JuMP's nonlinear macros. This input format is useful for programmatically generated expressions.
  • s/Mathematical Programming/Mathematical Optimization/
  • Support for local cuts (Thanks to @madanim, Mehdi Madani)
  • Document Xpress interface developed by @joaquimg, Joaquim Dias Garcia
  • Minor bug and deprecation fixes (Thanks @odow, @jrevels)

Version 0.13.2 (May 16, 2016)

  • Compatibility update for MathProgBase

Version 0.13.1 (May 3, 2016)

  • Fix broken deprecation for registerNLfunction.

Version 0.13.0 (April 29, 2016)

  • Most exported methods and macros have been renamed to avoid camelCase. See the list of changes here. There is a 1-1 mapping from the old names to the new, and it is safe to simply replace the names to update existing models.
  • Specify variable lower/upper bounds in @variable using the lowerbound and upperbound keyword arguments.
  • Change name printed for variable using the basename keyword argument to @variable.
  • New @variables macro allows multi-line declaration of groups of variables.
  • A number of solver methods previously available only through MathProgBase are now exposed directly in JuMP. The fix was recorded live.
  • Compatibility fixes with Julia 0.5.
  • The "end" indexing syntax is no longer supported within JuMPArrays which do not use 1-based indexing until upstream issues are resolved, see here.

Version 0.12.2 (March 9, 2016)

  • Small fixes for nonlinear optimization

Version 0.12.1 (March 1, 2016)

  • Fix a regression in slicing for JuMPArrays (when not using 1-based indexing)

Version 0.12.0 (February 27, 2016)

  • The automatic differentiation functionality has been completely rewritten with a number of user-facing changes:
    • @defExpr and @defNLExpr now take the model as the first argument. The previous one-argument version of @defExpr is deprecated; all expressions should be named. For example, replace @defExpr(2x+y) with @defExpr(jump_model, my_expr, 2x+y).
    • JuMP no longer uses Julia's variable binding rules for efficiently re-solving a sequence of nonlinear models. Instead, we have introduced nonlinear parameters. This is a breaking change, so we have added a warning message when we detect models that may depend on the old behavior.
    • Support for user-defined functions integrated within nonlinear JuMP expressions.
  • Replaced iteration over AffExpr with Number-like scalar iteration; previous iteration behavior is now available via linearterms(::AffExpr).
  • Stopping the solver via throw(CallbackAbort()) from a callback no longer triggers an exception. Instead, solve() returns UserLimit status.
  • getDual() now works for conic problems (Thanks @emreyamangil.)

Version 0.11.3 (February 4, 2016)

  • Bug-fix for problems with quadratic objectives and semidefinite constraints

Version 0.11.2 (January 14, 2016)

  • Compatibility update for Mosek

Version 0.11.1 (December 1, 2015)

  • Remove usage of @compat in tests.
  • Fix updating quadratic objectives for nonlinear models.

Version 0.11.0 (November 30, 2015)

  • Julia 0.4.0 is the minimum required version for this release.
  • Fix for scoping semantics of index variables in sum{}. Index variables no longer leak into the surrounding scope.
  • Addition of the solve(m::Model, relaxation=true) keyword argument to solve the standard continuous relaxation of model m
  • The getConstraintBounds() method allows access to the lower and upper bounds of all constraints in a (nonlinear) model.
  • Update for breaking changes in MathProgBase

Version 0.10.3 (November 20, 2015)

  • Fix a rare error when parsing quadratic expressions
  • Fix Variable() constructor with default arguments
  • Detect unrecognized keywords in solve()

Version 0.10.2 (September 28, 2015)

  • Fix for deprecation warnings

Version 0.10.1 (September 3, 2015)

  • Fixes for ambiguity warnings.
  • Fix for breaking change in precompilation syntax in Julia 0.4-pre

Version 0.10.0 (August 31, 2015)

  • Support (on Julia 0.4 and later) for conditions in indexing @defVar and @addConstraint constructs, for example, @defVar(m, x[i=1:5,j=1:5; i+j >= 3])
  • Support for vectorized operations on Variables and expressions. See the documentation for details.
  • New getVar() method to access variables in a model by name
  • Support for semidefinite programming.
  • Dual solutions are now available for general nonlinear problems. You may call getDual on a reference object for a nonlinear constraint, and getDual on a variable object for Lagrange multipliers from active bounds.
  • Introduce warnings for two common performance traps: too many calls to getValue() on a collection of variables and use of the + operator in a loop to sum expressions.
  • Second-order cone constraints can be written directly with the norm() and norm2{} syntax.
  • Implement MathProgBase interface for querying Hessian-vector products.
  • Iteration over JuMPContainers is deprecated; instead, use the keys and values functions, and zip(keys(d),values(d)) for the old behavior.
  • @defVar returns Array{Variable,N} when each of N index sets are of the form 1:nᵢ.
  • Module precompilation: on Julia 0.4 and later, using JuMP is now much faster.

Version 0.9.3 (August 11, 2015)

  • Fixes for FactCheck testing on julia v0.4.

Version 0.9.2 (June 27, 2015)

  • Fix bug in @addConstraints.

Version 0.9.1 (April 25, 2015)

  • Fix for Julia 0.4-dev.
  • Small infrastructure improvements for extensions.

Version 0.9.0 (April 18, 2015)

  • Comparison operators for constructing constraints (for example, 2x >= 1) have been deprecated. Instead, construct the constraints explicitly in the @addConstraint macro to add them to the model, or in the @LinearConstraint macro to create a stand-alone linear constraint instance.
  • getValue() method implemented to compute the value of a nonlinear subexpression
  • JuMP is now released under the Mozilla Public License version 2.0 (was previously LGPL). MPL is a copyleft license which is less restrictive than LGPL, especially for embedding JuMP within other applications.
  • A number of performance improvements in ReverseDiffSparse for computing derivatives.
  • MathProgBase.getsolvetime(m) now returns the solution time reported by the solver, if available. (Thanks @odow, Oscar Dowson)
  • Formatting fix for LP format output. (Thanks @sbebo, Leonardo Taccari).

Version 0.8.0 (February 17, 2015)

  • Nonlinear subexpressions now supported with the @defNLExpr macro.
  • SCS supported for solving second-order conic problems.
  • setXXXCallback family deprecated in favor of addXXXCallback.
  • Multiple callbacks of the same type can be registered.
  • Added support for informational callbacks via addInfoCallback.
  • A CallbackAbort exception can be thrown from callback to safely exit optimization.

Version 0.7.4 (February 4, 2015)

  • Reduced costs and linear constraint duals are now accessible when quadratic constraints are present.
  • Two-sided nonlinear constraints are supported.
  • Methods for accessing the number of variables and constraints in a model are renamed.
  • New default procedure for setting initial values in nonlinear optimization: project zero onto the variable bounds.
  • Small bug fixes.

Version 0.7.3 (January 14, 2015)

  • Fix a method ambiguity conflict with Compose.jl (cosmetic fix)

Version 0.7.2 (January 9, 2015)

  • Fix a bug in sum(::JuMPDict)
  • Added the setCategory function to change a variables category (for example, continuous or binary)

after construction, and getCategory to retrieve the variable category.

Version 0.7.1 (January 2, 2015)

  • Fix a bug in parsing linear expressions in macros. Affects only Julia 0.4 and later.

Version 0.7.0 (December 29, 2014)

Linear/quadratic/conic programming

  • Breaking change: The syntax for column-wise model generation has been changed to use keyword arguments in @defVar.
  • On Julia 0.4 and later, variables and coefficients may be multiplied in any order within macros. That is, variable*coefficient is now valid syntax.
  • ECOS supported for solving second-order conic problems.

Nonlinear programming

  • Support for skipping model generation when solving a sequence of nonlinear models with changing data.
  • Fix a memory leak when solving a sequence of nonlinear models.
  • The @addNLConstraint macro now supports the three-argument version to define sets of nonlinear constraints.
  • KNITRO supported as a nonlinear solver.
  • Speed improvements for model generation.
  • The @addNLConstraints macro supports adding multiple (groups of) constraints at once. Syntax is similar to @addConstraints.
  • Discrete variables allowed in nonlinear problems for solvers which support them (currently only KNITRO).

General

  • Starting values for variables may now be specified with @defVar(m, x, start=value).
  • The setSolver function allows users to change the solver subsequent to model creation.
  • Support for "fixed" variables via the @defVar(m, x == 1) syntax.
  • Unit tests rewritten to use FactCheck.jl, improved testing across solvers.

Version 0.6.3 (October 19, 2014)

  • Fix a bug in multiplying two AffExpr objects.

Version 0.6.2 (October 11, 2014)

  • Further improvements and bug fixes for printing.
  • Fixed a bug in @defExpr.
  • Support for accessing expression graphs through the MathProgBase NLP interface.

Version 0.6.1 (September 19, 2014)

  • Improvements and bug fixes for printing.

Version 0.6.0 (September 9, 2014)

  • Julia 0.3.0 is the minimum required version for this release.
  • buildInternalModel(m::Model) added to build solver-level model in memory without optimizing.
  • Deprecate load_model_only keyword argument to solve.
  • Add groups of constraints with @addConstraints macro.
  • Unicode operators now supported, including for sum, for prod, and /
  • Quadratic constraints supported in @addConstraint macro.
  • Quadratic objectives supported in @setObjective macro.
  • MathProgBase solver-independent interface replaces Ipopt-specific interface for nonlinear problems
    • Breaking change: IpoptOptions no longer supported to specify solver options, use m = Model(solver=IpoptSolver(options...)) instead.
  • New solver interfaces: ECOS, NLopt, and nonlinear support for MOSEK
  • New option to control whether the lazy constraint callback is executed at each node in the B&B tree or just when feasible solutions are found
  • Add support for semicontinuous and semi-integer variables for those solvers that support them.
  • Add support for index dependencies (for example, triangular indexing) in @defVar, @addConstraint, and @defExpr (for example, @defVar(m, x[i=1:10,j=i:10])).
    • This required some changes to the internal structure of JuMP containers, which may break code that explicitly stored JuMPDict objects.

Version 0.5.8 (September 24, 2014)

  • Fix a bug with specifying solvers (affects Julia 0.2 only)

Version 0.5.7 (September 5, 2014)

  • Fix a bug in printing models

Version 0.5.6 (September 2, 2014)

  • Add support for semicontinuous and semi-integer variables for those solvers that support them.
    • Breaking change: Syntax for Variable() constructor has changed (use of this interface remains discouraged)
  • Update for breaking changes in MathProgBase

Version 0.5.5 (July 6, 2014)

  • Fix bug with problem modification: adding variables that did not appear in existing constraints or objective.

Version 0.5.4 (June 19, 2014)

  • Update for breaking change in MathProgBase which reduces loading times for using JuMP
  • Fix error when MIPs not solved to optimality

Version 0.5.3 (May 21, 2014)

  • Update for breaking change in ReverseDiffSparse

Version 0.5.2 (May 9, 2014)

  • Fix compatibility with Julia 0.3 prerelease

Version 0.5.1 (May 5, 2014)

  • Fix a bug in coefficient handling inside lazy constraints and user cuts

Version 0.5.0 (May 2, 2014)

  • Support for nonlinear optimization with exact, sparse second-order derivatives automatically computed. Ipopt is currently the only solver supported.
  • getValue for AffExpr and QuadExpr
  • Breaking change: getSolverModel replaced by getInternalModel, which returns the internal MathProgBase-level model
  • Groups of constraints can be specified with @addConstraint (see documentation for details). This is not a breaking change.
  • dot(::JuMPDict{Variable},::JuMPDict{Variable}) now returns the corresponding quadratic expression.

Version 0.4.1 (March 24, 2014)

  • Fix bug where change in objective sense was ignored when re-solving a model.
  • Fix issue with handling zero coefficients in AffExpr.

Version 0.4.0 (March 10, 2014)

  • Support for SOS1 and SOS2 constraints.
  • Solver-independent callback for user heuristics.
  • dot and sum implemented for JuMPDict objects. Now you can say @addConstraint(m, dot(a,x) <= b).
  • Developers: support for extensions to JuMP. See definition of Model in src/JuMP.jl for more details.
  • Option to construct the low-level model before optimizing.

Version 0.3.2 (February 17, 2014)

  • Improved model printing
    • Preliminary support for IJulia output

Version 0.3.1 (January 30, 2014)

  • Documentation updates
  • Support for MOSEK
  • CPLEXLink renamed to CPLEX

Version 0.3.0 (January 21, 2014)

  • Unbounded/infeasibility rays: getValue() will return the corresponding components of an unbounded ray when a model is unbounded, if supported by the selected solver. getDual() will return an infeasibility ray (Farkas proof) if a model is infeasible and the selected solver supports this feature.
  • Solver-independent callbacks for user generated cuts.
  • Use new interface for solver-independent QCQP.
  • setlazycallback renamed to setLazyCallback for consistency.

Version 0.2.0 (December 15, 2013)

Breaking

  • Objective sense is specified in setObjective instead of in the Model constructor.
  • lpsolver and mipsolver merged into single solver option.

Added

  • Problem modification with efficient LP restarts and MIP warm-starts.
  • Relatedly, column-wise modeling now supported.
  • Solver-independent callbacks supported. Currently we support only a "lazy constraint" callback, which works with Gurobi, CPLEX, and GLPK. More callbacks coming soon.

Version 0.1.2 (November 16, 2013)

  • Bug fixes for printing, improved error messages.
  • Allow AffExpr to be used in macros; for example, ex = y + z; @addConstraint(m, x + 2*ex <= 3)

Version 0.1.1 (October 23, 2013)

  • Update for solver specification API changes in MathProgBase.

Version 0.1.0 (October 3, 2013)

  • Initial public release.
diff --git a/dev/developers/checklists/index.html b/dev/developers/checklists/index.html index 743798bba49..250dce95e64 100644 --- a/dev/developers/checklists/index.html +++ b/dev/developers/checklists/index.html @@ -69,4 +69,4 @@ - [ ] Implement `vectorize(data, ::NewShape)::Vector` - [ ] Implement `reshape_vector(vector, ::NewShape)` - [ ] Implement `dual_shape`, or verify that the shape is self-dual - - [ ] Add the tests from https://github.com/jump-dev/JuMP.jl/pull/3816 + - [ ] Add the tests from https://github.com/jump-dev/JuMP.jl/pull/3816 diff --git a/dev/developers/contributing/index.html b/dev/developers/contributing/index.html index 06f8881f965..a1c9a3ed1b3 100644 --- a/dev/developers/contributing/index.html +++ b/dev/developers/contributing/index.html @@ -25,4 +25,4 @@ $ git checkout master -$ git pull
Note

If you have suggestions to improve this guide, please make a pull request. It's particularly helpful if you do this after your first pull request because you'll know all the parts that could be explained better.

+$ git pull
Note

If you have suggestions to improve this guide, please make a pull request. It's particularly helpful if you do this after your first pull request because you'll know all the parts that could be explained better.

diff --git a/dev/developers/custom_solver_binaries/index.html b/dev/developers/custom_solver_binaries/index.html index 875b5fd8ee1..d2386e8c355 100644 --- a/dev/developers/custom_solver_binaries/index.html +++ b/dev/developers/custom_solver_binaries/index.html @@ -90,4 +90,4 @@ libCbc_path = "/usr/local/Cellar/cbc/2.10.5/lib/libCbc.3.10.5" libOsiCbc_path = "/usr/local/Cellar/cbc/2.10.5/lib/libOsiCbc.3.10.5" libcbcsolver_path = "/usr/local/Cellar/cbc/2.10.5/lib/libCbcSolver.3.10.5"
Info

Note that capitalization matters, so libcbcsolver_path corresponds to libCbcSolver.3.10.5.

Override entire artifact

To use the homebrew install as our custom binary we add the following to ~/.julia/artifacts/Overrides.toml:

# Override for Cbc_jll
-e481bc81db5e229ba1f52b2b4bd57484204b1b06 = "/usr/local/Cellar/cbc/2.10.5"
+e481bc81db5e229ba1f52b2b4bd57484204b1b06 = "/usr/local/Cellar/cbc/2.10.5" diff --git a/dev/developers/extensions/index.html b/dev/developers/extensions/index.html index cb21fd254ae..60b93162f2d 100644 --- a/dev/developers/extensions/index.html +++ b/dev/developers/extensions/index.html @@ -310,4 +310,4 @@ _function_barrier(names, model, F, S) end return names -end
Note

It is important to explicitly type the F and S arguments. If you leave them untyped, for example, function _function_barrier(names, model, F, S), Julia will not specialize the function calls and performance will not be improved.

+end
Note

It is important to explicitly type the F and S arguments. If you leave them untyped, for example, function _function_barrier(names, model, F, S), Julia will not specialize the function calls and performance will not be improved.

diff --git a/dev/developers/roadmap/index.html b/dev/developers/roadmap/index.html index ec78bdfa55e..4c0c3d5f873 100644 --- a/dev/developers/roadmap/index.html +++ b/dev/developers/roadmap/index.html @@ -3,4 +3,4 @@ function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'G-0RZ8X3D3D0', {'page_path': location.pathname + location.search + location.hash}); -

Development roadmap

The JuMP developers have compiled this roadmap document to share their plans and goals with the JuMP community. Contributions to roadmap issues are especially invited.

Most of these issues will require changes to both JuMP and MathOptInterface, and are non-trivial in their implementation. They are in no particular order, but represent broad themes that we see as areas in which JuMP could be improved.

  • Support nonlinear expressions with vector-valued inputs and outputs. There are a few related components:
    • Representing terms like log(det(X)) as necessary for Convex.jl
    • Automatic differentiation of terms with vector inputs and outputs
    • User-defined functions with vector–as opposed to scalar–inputs, which is particularly useful for optimal control problems
    • User-defined functions with vector outputs, avoiding the need for User-defined operators with vector outputs
  • Add support for modeling with SI units. The UnitJuMP.jl extension is a good proof of concept for what this would look like. We want to make units a first-class concept in JuMP. See #1350 for more details.

Completed

+

Development roadmap

The JuMP developers have compiled this roadmap document to share their plans and goals with the JuMP community. Contributions to roadmap issues are especially invited.

Most of these issues will require changes to both JuMP and MathOptInterface, and are non-trivial in their implementation. They are in no particular order, but represent broad themes that we see as areas in which JuMP could be improved.

  • Support nonlinear expressions with vector-valued inputs and outputs. There are a few related components:
    • Representing terms like log(det(X)) as necessary for Convex.jl
    • Automatic differentiation of terms with vector inputs and outputs
    • User-defined functions with vector–as opposed to scalar–inputs, which is particularly useful for optimal control problems
    • User-defined functions with vector outputs, avoiding the need for User-defined operators with vector outputs
  • Add support for modeling with SI units. The UnitJuMP.jl extension is a good proof of concept for what this would look like. We want to make units a first-class concept in JuMP. See #1350 for more details.

Completed

diff --git a/dev/developers/style/index.html b/dev/developers/style/index.html index e626d0eae3f..d506baa969c 100644 --- a/dev/developers/style/index.html +++ b/dev/developers/style/index.html @@ -182,4 +182,4 @@ end # module TestPkg -TestPkg.runtests()

Break the tests into multiple files, with one module per file, so that subsets of the codebase can be tested by calling include with the relevant file.

+TestPkg.runtests()

Break the tests into multiple files, with one module per file, so that subsets of the codebase can be tested by calling include with the relevant file.

diff --git a/dev/extensions/DimensionalData/index.html b/dev/extensions/DimensionalData/index.html index bfde46ee14f..c0c06fe5845 100644 --- a/dev/extensions/DimensionalData/index.html +++ b/dev/extensions/DimensionalData/index.html @@ -48,4 +48,4 @@ ↓ j Categorical{String} ["a", "b"] ForwardOrdered └──────────────────────────────────────────────────────────────────────────────┘ "a" x[2,a] + x[3,a] + x[4,a] ≤ 1 - "b" x[2,b] + x[3,b] + x[4,b] ≤ 1

Documentation

See the DimensionalData.jl documentation for more details on the syntax and features of DimensionalData.DimArray.

+ "b" x[2,b] + x[3,b] + x[4,b] ≤ 1

Documentation

See the DimensionalData.jl documentation for more details on the syntax and features of DimensionalData.DimArray.

diff --git a/dev/extensions/introduction/index.html b/dev/extensions/introduction/index.html index 51da3a16cc0..78f12164f2c 100644 --- a/dev/extensions/introduction/index.html +++ b/dev/extensions/introduction/index.html @@ -3,4 +3,4 @@ function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'G-0RZ8X3D3D0', {'page_path': location.pathname + location.search + location.hash}); -

Introduction

This section of the documentation contains brief documentation for some popular JuMP extensions. The list of extensions is not exhaustive, but instead is intended to help you discover popular JuMP extensions, and to give you an overview of the types of extensions that are possible to write with JuMP.

Affiliation

Packages beginning with jump-dev/ are developed and maintained by the JuMP developers.

Packages that do not begin with jump-dev/ are developed independently. The developers of these packages requested or consented to the inclusion of their README contents in the JuMP documentation for the benefit of users.

Adding new extensions

Written an extension? Add it to this section of the JuMP documentation by making a pull request to the docs/packages.toml file.

Weak dependencies

Some extensions listed in this section are implemented using the weak dependency feature added to Julia in v1.9. These extensions are activated if and only if you have JuMP and the other package loaded into your current scope with using or import.

Compat

Using a weak dependency requires Julia v1.9 or later.

+

Introduction

This section of the documentation contains brief documentation for some popular JuMP extensions. The list of extensions is not exhaustive, but instead is intended to help you discover popular JuMP extensions, and to give you an overview of the types of extensions that are possible to write with JuMP.

Affiliation

Packages beginning with jump-dev/ are developed and maintained by the JuMP developers.

Packages that do not begin with jump-dev/ are developed independently. The developers of these packages requested or consented to the inclusion of their README contents in the JuMP documentation for the benefit of users.

Adding new extensions

Written an extension? Add it to this section of the JuMP documentation by making a pull request to the docs/packages.toml file.

Weak dependencies

Some extensions listed in this section are implemented using the weak dependency feature added to Julia in v1.9. These extensions are activated if and only if you have JuMP and the other package loaded into your current scope with using or import.

Compat

Using a weak dependency requires Julia v1.9 or later.

diff --git a/dev/index.html b/dev/index.html index f0f4167530d..61e426401ee 100644 --- a/dev/index.html +++ b/dev/index.html @@ -32,4 +32,4 @@ journal = {Mathematical Programming Computation}, year = {2023}, doi = {10.1007/s12532-023-00239-3} -}

NumFOCUS

NumFOCUS logo

JuMP is a Sponsored Project of NumFOCUS, a 501(c)(3) nonprofit charity in the United States. NumFOCUS provides JuMP with fiscal, legal, and administrative support to help ensure the health and sustainability of the project. Visit numfocus.org for more information.

You can support JuMP by donating.

Donations to JuMP are managed by NumFOCUS. For donors in the United States, your gift is tax-deductible to the extent provided by law. As with any donation, you should consult with your tax adviser about your particular tax situation.

JuMP's largest expense is the annual JuMP-dev workshop. Donations will help us provide travel support for JuMP-dev attendees and take advantage of other opportunities that arise to support JuMP development.

License

JuMP is licensed under the MPL-2.0 software license. Consult the license and the Mozilla FAQ for more information. In addition, JuMP is typically used in conjunction with solver packages and extensions which have their own licences. Consult their package repositories for the specific licenses that apply.

+}

NumFOCUS

NumFOCUS logo

JuMP is a Sponsored Project of NumFOCUS, a 501(c)(3) nonprofit charity in the United States. NumFOCUS provides JuMP with fiscal, legal, and administrative support to help ensure the health and sustainability of the project. Visit numfocus.org for more information.

You can support JuMP by donating.

Donations to JuMP are managed by NumFOCUS. For donors in the United States, your gift is tax-deductible to the extent provided by law. As with any donation, you should consult with your tax adviser about your particular tax situation.

JuMP's largest expense is the annual JuMP-dev workshop. Donations will help us provide travel support for JuMP-dev attendees and take advantage of other opportunities that arise to support JuMP development.

License

JuMP is licensed under the MPL-2.0 software license. Consult the license and the Mozilla FAQ for more information. In addition, JuMP is typically used in conjunction with solver packages and extensions which have their own licences. Consult their package repositories for the specific licenses that apply.

diff --git a/dev/installation/index.html b/dev/installation/index.html index 650460d178c..812548eb5ab 100644 --- a/dev/installation/index.html +++ b/dev/installation/index.html @@ -28,4 +28,4 @@ [4076af6c] ↓ JuMP v0.21.5 ⇒ v0.18.6 [707a9f91] + JuMPeR v0.6.0 Updating `~/jump_example/Manifest.toml` - ... lines omitted ...

JuMPeR gets added at version 0.6.0 (+ JuMPeR v0.6.0), but JuMP gets downgraded from 0.21.5 to 0.18.6 (↓ JuMP v0.21.5 ⇒ v0.18.6)! The reason for this is that JuMPeR doesn't support a version of JuMP newer than 0.18.6.

Tip

Pay careful attention to the output of the package manager when adding new packages, especially when you see a package being downgraded.

+ ... lines omitted ...

JuMPeR gets added at version 0.6.0 (+ JuMPeR v0.6.0), but JuMP gets downgraded from 0.21.5 to 0.18.6 (↓ JuMP v0.21.5 ⇒ v0.18.6)! The reason for this is that JuMPeR doesn't support a version of JuMP newer than 0.18.6.

Tip

Pay careful attention to the output of the package manager when adding new packages, especially when you see a package being downgraded.

diff --git a/dev/manual/callbacks/index.html b/dev/manual/callbacks/index.html index 98ead8c9efd..e237b801660 100644 --- a/dev/manual/callbacks/index.html +++ b/dev/manual/callbacks/index.html @@ -84,4 +84,4 @@ end my_callback_function (generic function with 1 method) -julia> set_attribute(model, MOI.HeuristicCallback(), my_callback_function)

The third argument to submit is a vector of JuMP variables, and the fourth argument is a vector of values corresponding to each variable.

MOI.submit returns an enum that depends on whether the solver accepted the solution. The possible return codes are:

Warning

Some solvers may accept partial solutions. Others require a feasible integer solution for every variable. If in doubt, provide a complete solution.

Info

The heuristic solution callback may be called at fractional nodes in the branch-and-bound tree. There is no guarantee that the callback is called at every fractional primal solution.

+julia> set_attribute(model, MOI.HeuristicCallback(), my_callback_function)

The third argument to submit is a vector of JuMP variables, and the fourth argument is a vector of values corresponding to each variable.

MOI.submit returns an enum that depends on whether the solver accepted the solution. The possible return codes are:

Warning

Some solvers may accept partial solutions. Others require a feasible integer solution for every variable. If in doubt, provide a complete solution.

Info

The heuristic solution callback may be called at fractional nodes in the branch-and-bound tree. There is no guarantee that the callback is called at every fractional primal solution.

diff --git a/dev/manual/complex/index.html b/dev/manual/complex/index.html index 6ce0a70a563..72e5d601993 100644 --- a/dev/manual/complex/index.html +++ b/dev/manual/complex/index.html @@ -197,4 +197,4 @@ julia> @constraint(model, H in HermitianPSDCone()) [x[1] im - -im -x[2]] ∈ HermitianPSDCone()
Note

The matrix H in H in HermitianPSDCone() must be a LinearAlgebra.Hermitian matrix type. A build_constraint error will be thrown if the matrix is a different matrix type.

+ -im -x[2]] ∈ HermitianPSDCone()
Note

The matrix H in H in HermitianPSDCone() must be a LinearAlgebra.Hermitian matrix type. A build_constraint error will be thrown if the matrix is a different matrix type.

diff --git a/dev/manual/constraints/index.html b/dev/manual/constraints/index.html index 674d8653ff9..df0573e309e 100644 --- a/dev/manual/constraints/index.html +++ b/dev/manual/constraints/index.html @@ -801,4 +801,4 @@ (x[1] == x[2]) - 0.0 = 0 julia> @constraint(model, x[1] == x[2] := rhs) -x[1] == x[2] = false +x[1] == x[2] = false diff --git a/dev/manual/containers/index.html b/dev/manual/containers/index.html index 80a861672f9..762602c7dae 100644 --- a/dev/manual/containers/index.html +++ b/dev/manual/containers/index.html @@ -232,4 +232,4 @@ julia> Containers.@container([i = 1:2, j = 1:4; condition(i, j)], i + j) JuMP.Containers.SparseAxisArray{Int64, 2, Tuple{Int64, Int64}} with 2 entries: [1, 2] = 3 - [1, 4] = 5 + [1, 4] = 5 diff --git a/dev/manual/expressions/index.html b/dev/manual/expressions/index.html index 1ddf0985b4e..baa944db465 100644 --- a/dev/manual/expressions/index.html +++ b/dev/manual/expressions/index.html @@ -247,4 +247,4 @@ julia> x 2-element Vector{AffExpr}: 1.1 - 0

Note that for large expressions this will be slower due to the allocation of additional temporary objects.

+ 0

Note that for large expressions this will be slower due to the allocation of additional temporary objects.

diff --git a/dev/manual/models/index.html b/dev/manual/models/index.html index a384c1bda57..034fe6f1c9d 100644 --- a/dev/manual/models/index.html +++ b/dev/manual/models/index.html @@ -313,4 +313,4 @@ If you expected the solver to support your problem, you may have an error in your formulation. Otherwise, consider using a different solver. The list of available solvers, along with the problem types they support, is available at https://jump.dev/JuMP.jl/stable/installation/#Supported-solvers. -Stacktrace:
Warning

Another downside of direct mode is that the behavior of querying solution information after modifying the problem is solver-specific. This can lead to errors, or the solver silently returning an incorrect value. See OptimizeNotCalled errors for more information.

+Stacktrace:
Warning

Another downside of direct mode is that the behavior of querying solution information after modifying the problem is solver-specific. This can lead to errors, or the solver silently returning an incorrect value. See OptimizeNotCalled errors for more information.

diff --git a/dev/manual/nlp/index.html b/dev/manual/nlp/index.html index 02767c3e46d..c1618eac73a 100644 --- a/dev/manual/nlp/index.html +++ b/dev/manual/nlp/index.html @@ -344,4 +344,4 @@ f1(x[1]) - 1.0 ≤ 0 f2(x[1], x[2]) - 1.0 ≤ 0 f3(x[2], x[3], x[4]) - 1.0 ≤ 0 - f4(x[1], x[3], x[4], x[5]) - 1.0 ≤ 0

Known performance issues

The macro-based input to JuMP's nonlinear interface can cause a performance issue if you:

  1. write a macro with a large number (hundreds) of terms
  2. call that macro from within a function instead of from the top-level in global scope.

The first issue does not depend on the number of resulting terms in the mathematical expression, but rather the number of terms in the Julia Expr representation of that expression. For example, the expression sum(x[i] for i in 1:1_000_000) contains one million mathematical terms, but the Expr representation is just a single sum.

The most common cause, other than a lot of tedious typing, is if you write a program that automatically writes a JuMP model as a text file, which you later execute. One example is MINLPlib.jl which automatically transpiled models in the GAMS scalar format into JuMP examples.

As a rule of thumb, if you are writing programs to automatically generate expressions for the JuMP macros, you should target the Raw expression input instead. For more information, read MathOptInterface Issue#1997.

+ f4(x[1], x[3], x[4], x[5]) - 1.0 ≤ 0

Known performance issues

The macro-based input to JuMP's nonlinear interface can cause a performance issue if you:

  1. write a macro with a large number (hundreds) of terms
  2. call that macro from within a function instead of from the top-level in global scope.

The first issue does not depend on the number of resulting terms in the mathematical expression, but rather the number of terms in the Julia Expr representation of that expression. For example, the expression sum(x[i] for i in 1:1_000_000) contains one million mathematical terms, but the Expr representation is just a single sum.

The most common cause, other than a lot of tedious typing, is if you write a program that automatically writes a JuMP model as a text file, which you later execute. One example is MINLPlib.jl which automatically transpiled models in the GAMS scalar format into JuMP examples.

As a rule of thumb, if you are writing programs to automatically generate expressions for the JuMP macros, you should target the Raw expression input instead. For more information, read MathOptInterface Issue#1997.

diff --git a/dev/manual/nonlinear/index.html b/dev/manual/nonlinear/index.html index e0713c0452e..d7b0129d56e 100644 --- a/dev/manual/nonlinear/index.html +++ b/dev/manual/nonlinear/index.html @@ -319,4 +319,4 @@ julia> ForwardDiff.gradient(x -> my_operator_good(x...), [1.0, 2.0]) 2-element Vector{Float64}: 2.0 - 4.0 + 4.0 diff --git a/dev/manual/objective/index.html b/dev/manual/objective/index.html index ad44b526583..8855e492051 100644 --- a/dev/manual/objective/index.html +++ b/dev/manual/objective/index.html @@ -179,4 +179,4 @@ 2 x[1] julia> @constraint(model, obj3 <= 2.0) -x[1] + x[2] ≤ 2 +x[1] + x[2] ≤ 2 diff --git a/dev/manual/solutions/index.html b/dev/manual/solutions/index.html index 1c848fa757d..c6bb55789d6 100644 --- a/dev/manual/solutions/index.html +++ b/dev/manual/solutions/index.html @@ -429,4 +429,4 @@ x integer => 0.1

You can also use the functional form, where the first argument is a function that maps variables to their primal values:

julia> optimize!(model)
 
 julia> primal_feasibility_report(v -> value(v), model)
-Dict{Any, Float64}()
+Dict{Any, Float64}() diff --git a/dev/manual/variables/index.html b/dev/manual/variables/index.html index 179a742e623..eda656eab62 100644 --- a/dev/manual/variables/index.html +++ b/dev/manual/variables/index.html @@ -639,4 +639,4 @@ p*x julia> typeof(px) -QuadExpr (alias for GenericQuadExpr{Float64, GenericVariableRef{Float64}})

When to use a parameter

Parameters are most useful when solving nonlinear models in a sequence:

julia> using JuMP, Ipopt
julia> model = Model(Ipopt.Optimizer);
julia> set_silent(model)
julia> @variable(model, x)x
julia> @variable(model, p in Parameter(1.0))p
julia> @objective(model, Min, (x - p)^2)x² - 2 p*x + p²
julia> optimize!(model)
julia> value(x)1.0
julia> set_parameter_value(p, 5.0)
julia> optimize!(model)
julia> value(x)5.0

Using parameters can be faster than creating a new model from scratch with updated data because JuMP is able to avoid repeating a number of steps in processing the model before handing it off to the solver.

+QuadExpr (alias for GenericQuadExpr{Float64, GenericVariableRef{Float64}})

When to use a parameter

Parameters are most useful when solving nonlinear models in a sequence:

julia> using JuMP, Ipopt
julia> model = Model(Ipopt.Optimizer);
julia> set_silent(model)
julia> @variable(model, x)x
julia> @variable(model, p in Parameter(1.0))p
julia> @objective(model, Min, (x - p)^2)x² - 2 p*x + p²
julia> optimize!(model)
julia> value(x)1.0
julia> set_parameter_value(p, 5.0)
julia> optimize!(model)
julia> value(x)5.0

Using parameters can be faster than creating a new model from scratch with updated data because JuMP is able to avoid repeating a number of steps in processing the model before handing it off to the solver.

diff --git a/dev/moi/background/duality/index.html b/dev/moi/background/duality/index.html index f0dc1fe28cd..1884cd29f02 100644 --- a/dev/moi/background/duality/index.html +++ b/dev/moi/background/duality/index.html @@ -81,4 +81,4 @@ \max & \sum b_k y_k \\ \text{s.t.} \;\; & C+C^\top - \sum (A_k+A_k^\top) y_k \in \mathcal{S}_+ \\ & C-C^\top - \sum(A_k-A_k^\top) y_k = 0 -\end{align}\]

and we recover $Z = X + X^\top$.

+\end{align}\]

and we recover $Z = X + X^\top$.

diff --git a/dev/moi/background/infeasibility_certificates/index.html b/dev/moi/background/infeasibility_certificates/index.html index e542f723091..9745598b998 100644 --- a/dev/moi/background/infeasibility_certificates/index.html +++ b/dev/moi/background/infeasibility_certificates/index.html @@ -29,4 +29,4 @@ \end{align}\]

and:

\[-\sum_{i=1}^m b_i^\top (y_i + \eta d_i) > -\sum_{i=1}^m b_i^\top y_i,\]

for any feasible dual solution $y$. The latter simplifies to $-\sum_{i=1}^m b_i^\top d_i > 0$. For a maximization problem, the inequality is $\sum_{i=1}^m b_i^\top d_i < 0$. (Note that these are the same inequality, modulo a - sign.)

If the solver has found a certificate of primal infeasibility:

Note

The choice of whether to scale the ray $d$ to have magnitude 1 is left to the solver.

Infeasibility certificates of variable bounds

Many linear solvers (for example, Gurobi) do not provide explicit access to the primal infeasibility certificate of a variable bound. However, given a set of linear constraints:

\[\begin{align} l_A \le A x \le u_A \\ l_x \le x \le u_x, -\end{align}\]

the primal certificate of the variable bounds can be computed using the primal certificate associated with the affine constraints, $d$. (Note that $d$ will have one element for each row of the $A$ matrix, and that some or all of the elements in the vectors $l_A$ and $u_A$ may be $\pm \infty$. If both $l_A$ and $u_A$ are finite for some row, the corresponding element in `d must be 0.)

Given $d$, compute $\bar{d} = d^\top A$. If the bound is finite, a certificate for the lower variable bound of $x_i$ is $\max\{\bar{d}_i, 0\}$, and a certificate for the upper variable bound is $\min\{\bar{d}_i, 0\}$.

+\end{align}\]

the primal certificate of the variable bounds can be computed using the primal certificate associated with the affine constraints, $d$. (Note that $d$ will have one element for each row of the $A$ matrix, and that some or all of the elements in the vectors $l_A$ and $u_A$ may be $\pm \infty$. If both $l_A$ and $u_A$ are finite for some row, the corresponding element in `d must be 0.)

Given $d$, compute $\bar{d} = d^\top A$. If the bound is finite, a certificate for the lower variable bound of $x_i$ is $\max\{\bar{d}_i, 0\}$, and a certificate for the upper variable bound is $\min\{\bar{d}_i, 0\}$.

diff --git a/dev/moi/background/motivation/index.html b/dev/moi/background/motivation/index.html index d1478d7573c..88b6b66dedc 100644 --- a/dev/moi/background/motivation/index.html +++ b/dev/moi/background/motivation/index.html @@ -3,4 +3,4 @@ function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'G-0RZ8X3D3D0', {'page_path': location.pathname + location.search + location.hash}); -

Motivation

MathOptInterface (MOI) is a replacement for MathProgBase, the first-generation abstraction layer for mathematical optimization previously used by JuMP and Convex.jl.

To address a number of limitations of MathProgBase, MOI is designed to:

  • Be simple and extensible
    • unifying linear, quadratic, and conic optimization,
    • seamlessly facilitating extensions to essentially arbitrary constraints and functions (for example, indicator constraints, complementarity constraints, and piecewise-linear functions)
  • Be fast
    • by allowing access to a solver's in-memory representation of a problem without writing intermediate files (when possible)
    • by using multiple dispatch and avoiding requiring containers of non-concrete types
  • Allow a solver to return multiple results (for example, a pool of solutions)
  • Allow a solver to return extra arbitrary information via attributes (for example, variable- and constraint-wise membership in an irreducible inconsistent subset for infeasibility analysis)
  • Provide a greatly expanded set of status codes explaining what happened during the optimization procedure
  • Enable a solver to more precisely specify which problem classes it supports
  • Enable both primal and dual warm starts
  • Enable adding and removing both variables and constraints by indices that are not required to be consecutive
  • Enable any modification that the solver supports to an existing model
  • Avoid requiring the solver wrapper to store an additional copy of the problem data
+

Motivation

MathOptInterface (MOI) is a replacement for MathProgBase, the first-generation abstraction layer for mathematical optimization previously used by JuMP and Convex.jl.

To address a number of limitations of MathProgBase, MOI is designed to:

  • Be simple and extensible
    • unifying linear, quadratic, and conic optimization,
    • seamlessly facilitating extensions to essentially arbitrary constraints and functions (for example, indicator constraints, complementarity constraints, and piecewise-linear functions)
  • Be fast
    • by allowing access to a solver's in-memory representation of a problem without writing intermediate files (when possible)
    • by using multiple dispatch and avoiding requiring containers of non-concrete types
  • Allow a solver to return multiple results (for example, a pool of solutions)
  • Allow a solver to return extra arbitrary information via attributes (for example, variable- and constraint-wise membership in an irreducible inconsistent subset for infeasibility analysis)
  • Provide a greatly expanded set of status codes explaining what happened during the optimization procedure
  • Enable a solver to more precisely specify which problem classes it supports
  • Enable both primal and dual warm starts
  • Enable adding and removing both variables and constraints by indices that are not required to be consecutive
  • Enable any modification that the solver supports to an existing model
  • Avoid requiring the solver wrapper to store an additional copy of the problem data
diff --git a/dev/moi/background/naming_conventions/index.html b/dev/moi/background/naming_conventions/index.html index 56b01ca7762..cf7a14540c2 100644 --- a/dev/moi/background/naming_conventions/index.html +++ b/dev/moi/background/naming_conventions/index.html @@ -3,4 +3,4 @@ function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'G-0RZ8X3D3D0', {'page_path': location.pathname + location.search + location.hash}); -

Naming conventions

MOI follows several conventions for naming functions and structures. These should also be followed by packages extending MOI.

Sets

Sets encode the structure of constraints. Their names should follow the following conventions:

  • Abstract types in the set hierarchy should begin with Abstract and end in Set, for example, AbstractScalarSet, AbstractVectorSet.
  • Vector-valued conic sets should end with Cone, for example, NormInfinityCone, SecondOrderCone.
  • Vector-valued Cartesian products should be plural and not end in Cone, for example, Nonnegatives, not NonnegativeCone.
  • Matrix-valued conic sets should provide two representations: ConeSquare and ConeTriangle, for example, RootDetConeTriangle and RootDetConeSquare. See Matrix cones for more details.
  • Scalar sets should be singular, not plural, for example, Integer, not Integers.
  • As much as possible, the names should follow established conventions in the domain where this set is used: for instance, convex sets should have names close to those of CVX, and constraint-programming sets should follow MiniZinc's constraints.
+

Naming conventions

MOI follows several conventions for naming functions and structures. These should also be followed by packages extending MOI.

Sets

Sets encode the structure of constraints. Their names should follow the following conventions:

  • Abstract types in the set hierarchy should begin with Abstract and end in Set, for example, AbstractScalarSet, AbstractVectorSet.
  • Vector-valued conic sets should end with Cone, for example, NormInfinityCone, SecondOrderCone.
  • Vector-valued Cartesian products should be plural and not end in Cone, for example, Nonnegatives, not NonnegativeCone.
  • Matrix-valued conic sets should provide two representations: ConeSquare and ConeTriangle, for example, RootDetConeTriangle and RootDetConeSquare. See Matrix cones for more details.
  • Scalar sets should be singular, not plural, for example, Integer, not Integers.
  • As much as possible, the names should follow established conventions in the domain where this set is used: for instance, convex sets should have names close to those of CVX, and constraint-programming sets should follow MiniZinc's constraints.
diff --git a/dev/moi/changelog/index.html b/dev/moi/changelog/index.html index 332e164caf5..6d8d4bc070b 100644 --- a/dev/moi/changelog/index.html +++ b/dev/moi/changelog/index.html @@ -31,4 +31,4 @@ end write(path, s) end -end

v0.9.22 (May 22, 2021)

This release contains backports from the ongoing development of the v0.10 release.

v0.9.21 (April 23, 2021)

v0.9.20 (February 20, 2021)

v0.9.19 (December 1, 2020)

v0.9.18 (November 3, 2020)

v0.9.17 (September 21, 2020)

v0.9.16 (September 17, 2020)

v0.9.15 (September 14, 2020)

v0.9.14 (May 30, 2020)

v0.9.13 (March 24, 2020)

v0.9.12 (February 28, 2020)

v0.9.11 (February 21, 2020)

v0.9.10 (January 31, 2020)

v0.9.9 (December 29, 2019)

v0.9.8 (December 19, 2019)

v0.9.7 (October 30, 2019)

v0.9.6 (October 25, 2019)

v0.9.5 (October 9, 2019)

v0.9.4 (October 2, 2019)

v0.9.3 (September 20, 2019)

v0.9.2 (September 5, 2019)

v0.9.1 (August 22, 2019)

v0.9.0 (August 13, 2019)

v0.8.4 (March 13, 2019)

v0.8.3 (March 6, 2019)

v0.8.2 (February 7, 2019)

v0.8.1 (January 7, 2019)

v0.8.0 (December 18, 2018)

v0.7.0 (December 13, 2018)

v0.6.4 (November 27, 2018)

v0.6.3 (November 16, 2018)

v0.6.2 (October 26, 2018)

v0.6.1 (September 22, 2018)

v0.6.0 (August 30, 2018)

v0.5.0 (August 5, 2018)

v0.4.1 (June 28, 2018)

v0.4.0 (June 23, 2018)

v0.3.0 (May 25, 2018)

v0.2.0 (April 24, 2018)

v0.1.0 (February 28, 2018)

+end

v0.9.22 (May 22, 2021)

This release contains backports from the ongoing development of the v0.10 release.

v0.9.21 (April 23, 2021)

v0.9.20 (February 20, 2021)

v0.9.19 (December 1, 2020)

v0.9.18 (November 3, 2020)

v0.9.17 (September 21, 2020)

v0.9.16 (September 17, 2020)

v0.9.15 (September 14, 2020)

v0.9.14 (May 30, 2020)

v0.9.13 (March 24, 2020)

v0.9.12 (February 28, 2020)

v0.9.11 (February 21, 2020)

v0.9.10 (January 31, 2020)

v0.9.9 (December 29, 2019)

v0.9.8 (December 19, 2019)

v0.9.7 (October 30, 2019)

v0.9.6 (October 25, 2019)

v0.9.5 (October 9, 2019)

v0.9.4 (October 2, 2019)

v0.9.3 (September 20, 2019)

v0.9.2 (September 5, 2019)

v0.9.1 (August 22, 2019)

v0.9.0 (August 13, 2019)

v0.8.4 (March 13, 2019)

v0.8.3 (March 6, 2019)

v0.8.2 (February 7, 2019)

v0.8.1 (January 7, 2019)

v0.8.0 (December 18, 2018)

v0.7.0 (December 13, 2018)

v0.6.4 (November 27, 2018)

v0.6.3 (November 16, 2018)

v0.6.2 (October 26, 2018)

v0.6.1 (September 22, 2018)

v0.6.0 (August 30, 2018)

v0.5.0 (August 5, 2018)

v0.4.1 (June 28, 2018)

v0.4.0 (June 23, 2018)

v0.3.0 (May 25, 2018)

v0.2.0 (April 24, 2018)

v0.1.0 (February 28, 2018)

diff --git a/dev/moi/developer/checklists/index.html b/dev/moi/developer/checklists/index.html index f14311bc119..10464323c31 100644 --- a/dev/moi/developer/checklists/index.html +++ b/dev/moi/developer/checklists/index.html @@ -112,4 +112,4 @@ ## Documentation - - [ ] The version fields are updated in `docs/src/submodules/FileFormats/overview.md` + - [ ] The version fields are updated in `docs/src/submodules/FileFormats/overview.md` diff --git a/dev/moi/index.html b/dev/moi/index.html index 24d354cba8a..e8d8d41c073 100644 --- a/dev/moi/index.html +++ b/dev/moi/index.html @@ -10,4 +10,4 @@ year={2021}, doi={10.1287/ijoc.2021.1067}, publisher={INFORMS} -}

A preprint of this paper is freely available.

+}

A preprint of this paper is freely available.

diff --git a/dev/moi/manual/constraints/index.html b/dev/moi/manual/constraints/index.html index 6b0fc680151..59d57fdca7b 100644 --- a/dev/moi/manual/constraints/index.html +++ b/dev/moi/manual/constraints/index.html @@ -23,4 +23,4 @@ false

Constraint attributes

The following attributes are available for constraints:

Get and set these attributes using get and set.

julia> MOI.set(model, MOI.ConstraintName(), c, "con_c")
 
 julia> MOI.get(model, MOI.ConstraintName(), c)
-"con_c"

Constraints by function-set pairs

Below is a list of common constraint types and how they are represented as function-set pairs in MOI. In the notation below, $x$ is a vector of decision variables, $x_i$ is a scalar decision variable, $\alpha, \beta$ are scalar constants, $a, b$ are constant vectors, A is a constant matrix and $\mathbb{R}_+$ (resp. $\mathbb{R}_-$) is the set of non-negative (resp. non-positive) real numbers.

Linear constraints

Mathematical ConstraintMOI FunctionMOI Set
$a^Tx \le \beta$ScalarAffineFunctionLessThan
$a^Tx \ge \alpha$ScalarAffineFunctionGreaterThan
$a^Tx = \beta$ScalarAffineFunctionEqualTo
$\alpha \le a^Tx \le \beta$ScalarAffineFunctionInterval
$x_i \le \beta$VariableIndexLessThan
$x_i \ge \alpha$VariableIndexGreaterThan
$x_i = \beta$VariableIndexEqualTo
$\alpha \le x_i \le \beta$VariableIndexInterval
$Ax + b \in \mathbb{R}_+^n$VectorAffineFunctionNonnegatives
$Ax + b \in \mathbb{R}_-^n$VectorAffineFunctionNonpositives
$Ax + b = 0$VectorAffineFunctionZeros

By convention, solvers are not expected to support nonzero constant terms in the ScalarAffineFunctions the first four rows of the preceding table because they are redundant with the parameters of the sets. For example, encode $2x + 1 \le 2$ as $2x \le 1$.

Constraints with VariableIndex in LessThan, GreaterThan, EqualTo, or Interval sets have a natural interpretation as variable bounds. As such, it is typically not natural to impose multiple lower- or upper-bounds on the same variable, and the solver interfaces will throw respectively LowerBoundAlreadySet or UpperBoundAlreadySet.

Moreover, adding two VariableIndex constraints on the same variable with the same set is impossible because they share the same index as it is the index of the variable, see ConstraintIndex.

It is natural, however, to impose upper- and lower-bounds separately as two different constraints on a single variable. The difference between imposing bounds by using a single Interval constraint and by using separate LessThan and GreaterThan constraints is that the latter will allow the solver to return separate dual multipliers for the two bounds, while the former will allow the solver to return only a single dual for the interval constraint.

Conic constraints

Mathematical ConstraintMOI FunctionMOI Set
$\lVert Ax + b\rVert_2 \le c^Tx + d$VectorAffineFunctionSecondOrderCone
$y \ge \lVert x \rVert_2$VectorOfVariablesSecondOrderCone
$2yz \ge \lVert x \rVert_2^2, y,z \ge 0$VectorOfVariablesRotatedSecondOrderCone
$(a_1^Tx + b_1,a_2^Tx + b_2,a_3^Tx + b_3) \in \mathcal{E}$VectorAffineFunctionExponentialCone
$A(x) \in \mathcal{S}_+$VectorAffineFunctionPositiveSemidefiniteConeTriangle
$B(x) \in \mathcal{S}_+$VectorAffineFunctionPositiveSemidefiniteConeSquare
$x \in \mathcal{S}_+$VectorOfVariablesPositiveSemidefiniteConeTriangle
$x \in \mathcal{S}_+$VectorOfVariablesPositiveSemidefiniteConeSquare

where $\mathcal{E}$ is the exponential cone (see ExponentialCone), $\mathcal{S}_+$ is the set of positive semidefinite symmetric matrices, $A$ is an affine map that outputs symmetric matrices and $B$ is an affine map that outputs square matrices.

Quadratic constraints

Mathematical ConstraintMOI FunctionMOI Set
$\frac{1}{2}x^TQx + a^Tx + b \ge 0$ScalarQuadraticFunctionGreaterThan
$\frac{1}{2}x^TQx + a^Tx + b \le 0$ScalarQuadraticFunctionLessThan
$\frac{1}{2}x^TQx + a^Tx + b = 0$ScalarQuadraticFunctionEqualTo
Bilinear matrix inequalityVectorQuadraticFunctionPositiveSemidefiniteCone...
Note

For more details on the internal format of the quadratic functions see ScalarQuadraticFunction or VectorQuadraticFunction.

Discrete and logical constraints

Mathematical ConstraintMOI FunctionMOI Set
$x_i \in \mathbb{Z}$VariableIndexInteger
$x_i \in \{0,1\}$VariableIndexZeroOne
$x_i \in \{0\} \cup [l,u]$VariableIndexSemicontinuous
$x_i \in \{0\} \cup \{l,l+1,\ldots,u-1,u\}$VariableIndexSemiinteger
At most one component of $x$ can be nonzeroVectorOfVariablesSOS1
At most two components of $x$ can be nonzero, and if so they must be adjacent componentsVectorOfVariablesSOS2
$y = 1 \implies a^T x \in S$VectorAffineFunctionIndicator

JuMP mapping

The following bullet points show examples of how JuMP constraints are translated into MOI function-set pairs:

Variable bounds are handled in a similar fashion:

One notable difference is that a variable with an upper and lower bound is translated into two constraints, rather than an interval, that is:

+"con_c"

Constraints by function-set pairs

Below is a list of common constraint types and how they are represented as function-set pairs in MOI. In the notation below, $x$ is a vector of decision variables, $x_i$ is a scalar decision variable, $\alpha, \beta$ are scalar constants, $a, b$ are constant vectors, A is a constant matrix and $\mathbb{R}_+$ (resp. $\mathbb{R}_-$) is the set of non-negative (resp. non-positive) real numbers.

Linear constraints

Mathematical ConstraintMOI FunctionMOI Set
$a^Tx \le \beta$ScalarAffineFunctionLessThan
$a^Tx \ge \alpha$ScalarAffineFunctionGreaterThan
$a^Tx = \beta$ScalarAffineFunctionEqualTo
$\alpha \le a^Tx \le \beta$ScalarAffineFunctionInterval
$x_i \le \beta$VariableIndexLessThan
$x_i \ge \alpha$VariableIndexGreaterThan
$x_i = \beta$VariableIndexEqualTo
$\alpha \le x_i \le \beta$VariableIndexInterval
$Ax + b \in \mathbb{R}_+^n$VectorAffineFunctionNonnegatives
$Ax + b \in \mathbb{R}_-^n$VectorAffineFunctionNonpositives
$Ax + b = 0$VectorAffineFunctionZeros

By convention, solvers are not expected to support nonzero constant terms in the ScalarAffineFunctions the first four rows of the preceding table because they are redundant with the parameters of the sets. For example, encode $2x + 1 \le 2$ as $2x \le 1$.

Constraints with VariableIndex in LessThan, GreaterThan, EqualTo, or Interval sets have a natural interpretation as variable bounds. As such, it is typically not natural to impose multiple lower- or upper-bounds on the same variable, and the solver interfaces will throw respectively LowerBoundAlreadySet or UpperBoundAlreadySet.

Moreover, adding two VariableIndex constraints on the same variable with the same set is impossible because they share the same index as it is the index of the variable, see ConstraintIndex.

It is natural, however, to impose upper- and lower-bounds separately as two different constraints on a single variable. The difference between imposing bounds by using a single Interval constraint and by using separate LessThan and GreaterThan constraints is that the latter will allow the solver to return separate dual multipliers for the two bounds, while the former will allow the solver to return only a single dual for the interval constraint.

Conic constraints

Mathematical ConstraintMOI FunctionMOI Set
$\lVert Ax + b\rVert_2 \le c^Tx + d$VectorAffineFunctionSecondOrderCone
$y \ge \lVert x \rVert_2$VectorOfVariablesSecondOrderCone
$2yz \ge \lVert x \rVert_2^2, y,z \ge 0$VectorOfVariablesRotatedSecondOrderCone
$(a_1^Tx + b_1,a_2^Tx + b_2,a_3^Tx + b_3) \in \mathcal{E}$VectorAffineFunctionExponentialCone
$A(x) \in \mathcal{S}_+$VectorAffineFunctionPositiveSemidefiniteConeTriangle
$B(x) \in \mathcal{S}_+$VectorAffineFunctionPositiveSemidefiniteConeSquare
$x \in \mathcal{S}_+$VectorOfVariablesPositiveSemidefiniteConeTriangle
$x \in \mathcal{S}_+$VectorOfVariablesPositiveSemidefiniteConeSquare

where $\mathcal{E}$ is the exponential cone (see ExponentialCone), $\mathcal{S}_+$ is the set of positive semidefinite symmetric matrices, $A$ is an affine map that outputs symmetric matrices and $B$ is an affine map that outputs square matrices.

Quadratic constraints

Mathematical ConstraintMOI FunctionMOI Set
$\frac{1}{2}x^TQx + a^Tx + b \ge 0$ScalarQuadraticFunctionGreaterThan
$\frac{1}{2}x^TQx + a^Tx + b \le 0$ScalarQuadraticFunctionLessThan
$\frac{1}{2}x^TQx + a^Tx + b = 0$ScalarQuadraticFunctionEqualTo
Bilinear matrix inequalityVectorQuadraticFunctionPositiveSemidefiniteCone...
Note

For more details on the internal format of the quadratic functions see ScalarQuadraticFunction or VectorQuadraticFunction.

Discrete and logical constraints

Mathematical ConstraintMOI FunctionMOI Set
$x_i \in \mathbb{Z}$VariableIndexInteger
$x_i \in \{0,1\}$VariableIndexZeroOne
$x_i \in \{0\} \cup [l,u]$VariableIndexSemicontinuous
$x_i \in \{0\} \cup \{l,l+1,\ldots,u-1,u\}$VariableIndexSemiinteger
At most one component of $x$ can be nonzeroVectorOfVariablesSOS1
At most two components of $x$ can be nonzero, and if so they must be adjacent componentsVectorOfVariablesSOS2
$y = 1 \implies a^T x \in S$VectorAffineFunctionIndicator

JuMP mapping

The following bullet points show examples of how JuMP constraints are translated into MOI function-set pairs:

Variable bounds are handled in a similar fashion:

One notable difference is that a variable with an upper and lower bound is translated into two constraints, rather than an interval, that is:

diff --git a/dev/moi/manual/models/index.html b/dev/moi/manual/models/index.html index 94c11bece73..e9f4abe897a 100644 --- a/dev/moi/manual/models/index.html +++ b/dev/moi/manual/models/index.html @@ -3,4 +3,4 @@ function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'G-0RZ8X3D3D0', {'page_path': location.pathname + location.search + location.hash}); -

Models

The most significant part of MOI is the definition of the model API that is used to specify an instance of an optimization problem (for example, by adding variables and constraints). Objects that implement the model API must inherit from the ModelLike abstract type.

Notably missing from the model API is the method to solve an optimization problem. ModelLike objects may store an instance (for example, in memory or backed by a file format) without being linked to a particular solver. In addition to the model API, MOI defines AbstractOptimizer and provides methods to solve the model and interact with solutions. See the Solutions section for more details.

Info

Throughout the rest of the manual, model is used as a generic ModelLike, and optimizer is used as a generic AbstractOptimizer.

Tip

MOI does not export functions, but for brevity we often omit qualifying names with the MOI module. Best practice is to have

import MathOptInterface as MOI

and prefix all MOI methods with MOI. in user code. If a name is also available in base Julia, we always explicitly use the module prefix, for example, with MOI.get.

Attributes

Attributes are properties of the model that can be queried and modified. These include constants such as the number of variables in a model NumberOfVariables), and properties of variables and constraints such as the name of a variable (VariableName).

There are four types of attributes:

Some attributes are values that can be queried by the user but not modified, while other attributes can be modified by the user.

All interactions with attributes occur through the get and set functions.

Consult the docstrings of each attribute for information on what it represents.

ModelLike API

The following attributes are available:

AbstractOptimizer API

The following attributes are available:

+

Models

The most significant part of MOI is the definition of the model API that is used to specify an instance of an optimization problem (for example, by adding variables and constraints). Objects that implement the model API must inherit from the ModelLike abstract type.

Notably missing from the model API is the method to solve an optimization problem. ModelLike objects may store an instance (for example, in memory or backed by a file format) without being linked to a particular solver. In addition to the model API, MOI defines AbstractOptimizer and provides methods to solve the model and interact with solutions. See the Solutions section for more details.

Info

Throughout the rest of the manual, model is used as a generic ModelLike, and optimizer is used as a generic AbstractOptimizer.

Tip

MOI does not export functions, but for brevity we often omit qualifying names with the MOI module. Best practice is to have

import MathOptInterface as MOI

and prefix all MOI methods with MOI. in user code. If a name is also available in base Julia, we always explicitly use the module prefix, for example, with MOI.get.

Attributes

Attributes are properties of the model that can be queried and modified. These include constants such as the number of variables in a model NumberOfVariables), and properties of variables and constraints such as the name of a variable (VariableName).

There are four types of attributes:

Some attributes are values that can be queried by the user but not modified, while other attributes can be modified by the user.

All interactions with attributes occur through the get and set functions.

Consult the docstrings of each attribute for information on what it represents.

ModelLike API

The following attributes are available:

AbstractOptimizer API

The following attributes are available:

diff --git a/dev/moi/manual/modification/index.html b/dev/moi/manual/modification/index.html index bea6bfd09fe..ca2169327ea 100644 --- a/dev/moi/manual/modification/index.html +++ b/dev/moi/manual/modification/index.html @@ -152,4 +152,4 @@ ); julia> MOI.get(model, MOI.ConstraintFunction(), c) ≈ new_f -true +true diff --git a/dev/moi/manual/solutions/index.html b/dev/moi/manual/solutions/index.html index 2fbd5398dcf..9f0c08d32a8 100644 --- a/dev/moi/manual/solutions/index.html +++ b/dev/moi/manual/solutions/index.html @@ -36,4 +36,4 @@ end rethrow(err) # Something else went wrong. Rethrow the error end -end +end diff --git a/dev/moi/manual/standard_form/index.html b/dev/moi/manual/standard_form/index.html index 524c6fcdcd5..ab330dd14ce 100644 --- a/dev/moi/manual/standard_form/index.html +++ b/dev/moi/manual/standard_form/index.html @@ -7,4 +7,4 @@ & \min_{x \in \mathbb{R}^n} & f_0(x) \\ & \;\;\text{s.t.} & f_i(x) & \in \mathcal{S}_i & i = 1 \ldots m -\end{align}\]

where:

Tip

For more information on this standard form, read our paper.

MOI defines some commonly used functions and sets, but the interface is extensible to other sets recognized by the solver.

Functions

The function types implemented in MathOptInterface.jl are:

FunctionDescription
VariableIndex$x_j$, the projection onto a single coordinate defined by a variable index $j$.
VectorOfVariablesThe projection onto multiple coordinates (that is, extracting a sub-vector).
ScalarAffineFunction$a^T x + b$, where $a$ is a vector and $b$ scalar.
ScalarNonlinearFunction$f(x)$, where $f$ is a nonlinear function.
VectorAffineFunction$A x + b$, where $A$ is a matrix and $b$ is a vector.
ScalarQuadraticFunction$\frac{1}{2} x^T Q x + a^T x + b$, where $Q$ is a symmetric matrix, $a$ is a vector, and $b$ is a constant.
VectorQuadraticFunctionA vector of scalar-valued quadratic functions.
VectorNonlinearFunction$f(x)$, where $f$ is a vector-valued nonlinear function.

Extensions for nonlinear programming are present but not yet well documented.

One-dimensional sets

The one-dimensional set types implemented in MathOptInterface.jl are:

SetDescription
LessThan(u)$(-\infty, u]$
GreaterThan(l)$[l, \infty)$
EqualTo(v)$\{v\}$
Interval(l, u)$[l, u]$
Integer()$\mathbb{Z}$
ZeroOne()$\{ 0, 1 \}$
Semicontinuous(l, u)$\{ 0\} \cup [l, u]$
Semiinteger(l, u)$\{ 0\} \cup \{l,l+1,\ldots,u-1,u\}$

Vector cones

The vector-valued set types implemented in MathOptInterface.jl are:

SetDescription
Reals(d)$\mathbb{R}^{d}$
Zeros(d)$0^{d}$
Nonnegatives(d)$\{ x \in \mathbb{R}^{d} : x \ge 0 \}$
Nonpositives(d)$\{ x \in \mathbb{R}^{d} : x \le 0 \}$
SecondOrderCone(d)$\{ (t,x) \in \mathbb{R}^{d} : t \ge \lVert x \rVert_2 \}$
RotatedSecondOrderCone(d)$\{ (t,u,x) \in \mathbb{R}^{d} : 2tu \ge \lVert x \rVert_2^2, t \ge 0,u \ge 0 \}$
ExponentialCone()$\{ (x,y,z) \in \mathbb{R}^3 : y \exp (x/y) \le z, y > 0 \}$
DualExponentialCone()$\{ (u,v,w) \in \mathbb{R}^3 : -u \exp (v/u) \le \exp(1) w, u < 0 \}$
GeometricMeanCone(d)$\{ (t,x) \in \mathbb{R}^{1+n} : x \ge 0, t \le \sqrt[n]{x_1 x_2 \cdots x_n} \}$ where $n$ is $d - 1$
PowerCone(α)$\{ (x,y,z) \in \mathbb{R}^3 : x^{\alpha} y^{1-\alpha} \ge |z|, x \ge 0,y \ge 0 \}$
DualPowerCone(α)$\{ (u,v,w) \in \mathbb{R}^3 : \left(\frac{u}{\alpha}\right)^{\alpha}\left(\frac{v}{1-\alpha}\right)^{1-\alpha} \ge |w|, u,v \ge 0 \}$
NormOneCone(d)$\{ (t,x) \in \mathbb{R}^{d} : t \ge \sum_i \lvert x_i \rvert \}$
NormInfinityCone(d)$\{ (t,x) \in \mathbb{R}^{d} : t \ge \max_i \lvert x_i \rvert \}$
RelativeEntropyCone(d)$\{ (u, v, w) \in \mathbb{R}^{d} : u \ge \sum_i w_i \log (\frac{w_i}{v_i}), v_i \ge 0, w_i \ge 0 \}$
HyperRectangle(l, u)$\{x \in \bar{\mathbb{R}}^d: x_i \in [l_i, u_i] \forall i=1,\ldots,d\}$
NormCone(p, d)$\{ (t,x) \in \mathbb{R}^{d} : t \ge \left(\sum\limits_i \lvert x_i \rvert^p\right)^{\frac{1}{p}} \}$

Matrix cones

The matrix-valued set types implemented in MathOptInterface.jl are:

SetDescription
RootDetConeTriangle(d)$\{ (t,X) \in \mathbb{R}^{1+d(1+d)/2} : t \le \det(X)^{1/d}, X \mbox{ is the upper triangle of a PSD matrix} \}$
RootDetConeSquare(d)$\{ (t,X) \in \mathbb{R}^{1+d^2} : t \le \det(X)^{1/d}, X \mbox{ is a PSD matrix} \}$
PositiveSemidefiniteConeTriangle(d)$\{ X \in \mathbb{R}^{d(d+1)/2} : X \mbox{ is the upper triangle of a PSD matrix} \}$
PositiveSemidefiniteConeSquare(d)$\{ X \in \mathbb{R}^{d^2} : X \mbox{ is a PSD matrix} \}$
LogDetConeTriangle(d)$\{ (t,u,X) \in \mathbb{R}^{2+d(1+d)/2} : t \le u\log(\det(X/u)), X \mbox{ is the upper triangle of a PSD matrix}, u > 0 \}$
LogDetConeSquare(d)$\{ (t,u,X) \in \mathbb{R}^{2+d^2} : t \le u \log(\det(X/u)), X \mbox{ is a PSD matrix}, u > 0 \}$
NormSpectralCone(r, c)$\{ (t, X) \in \mathbb{R}^{1 + r \times c} : t \ge \sigma_1(X), X \mbox{ is a } r\times c\mbox{ matrix} \}$
NormNuclearCone(r, c)$\{ (t, X) \in \mathbb{R}^{1 + r \times c} : t \ge \sum_i \sigma_i(X), X \mbox{ is a } r\times c\mbox{ matrix} \}$
HermitianPositiveSemidefiniteConeTriangle(d)The cone of Hermitian positive semidefinite matrices, with
side_dimension rows and columns.
Scaled(S)The set S scaled so that Utilities.set_dot corresponds to LinearAlgebra.dot

Some of these cones can take two forms: XXXConeTriangle and XXXConeSquare.

In XXXConeTriangle sets, the matrix is assumed to be symmetric, and the elements are provided by a vector, in which the entries of the upper-right triangular part of the matrix are given column by column (or equivalently, the entries of the lower-left triangular part are given row by row).

In XXXConeSquare sets, the entries of the matrix are given column by column (or equivalently, row by row), and the matrix is constrained to be symmetric. As an example, given a 2-by-2 matrix of variables X and a one-dimensional variable t, we can specify a root-det constraint as [t, X11, X12, X22] ∈ RootDetConeTriangle or [t, X11, X12, X21, X22] ∈ RootDetConeSquare.

We provide both forms to enable flexibility for solvers who may natively support one or the other. Transformations between XXXConeTriangle and XXXConeSquare are handled by bridges, which removes the chance of conversion mistakes by users or solver developers.

Multi-dimensional sets with combinatorial structure

Other sets are vector-valued, with a particular combinatorial structure. Read their docstrings for more information on how to interpret them.

SetDescription
SOS1A Special Ordered Set (SOS) of Type I
SOS2A Special Ordered Set (SOS) of Type II
IndicatorA set to specify an indicator constraint
ComplementsA set to specify a mixed complementarity constraint
AllDifferentThe all_different global constraint
BinPackingThe bin_packing global constraint
CircuitThe circuit global constraint
CountAtLeastThe at_least global constraint
CountBelongsThe nvalue global constraint
CountDistinctThe distinct global constraint
CountGreaterThanThe count_gt global constraint
CumulativeThe cumulative global constraint
PathThe path global constraint
TableThe table global constraint
+\end{align}\]

where:

Tip

For more information on this standard form, read our paper.

MOI defines some commonly used functions and sets, but the interface is extensible to other sets recognized by the solver.

Functions

The function types implemented in MathOptInterface.jl are:

FunctionDescription
VariableIndex$x_j$, the projection onto a single coordinate defined by a variable index $j$.
VectorOfVariablesThe projection onto multiple coordinates (that is, extracting a sub-vector).
ScalarAffineFunction$a^T x + b$, where $a$ is a vector and $b$ scalar.
ScalarNonlinearFunction$f(x)$, where $f$ is a nonlinear function.
VectorAffineFunction$A x + b$, where $A$ is a matrix and $b$ is a vector.
ScalarQuadraticFunction$\frac{1}{2} x^T Q x + a^T x + b$, where $Q$ is a symmetric matrix, $a$ is a vector, and $b$ is a constant.
VectorQuadraticFunctionA vector of scalar-valued quadratic functions.
VectorNonlinearFunction$f(x)$, where $f$ is a vector-valued nonlinear function.

Extensions for nonlinear programming are present but not yet well documented.

One-dimensional sets

The one-dimensional set types implemented in MathOptInterface.jl are:

SetDescription
LessThan(u)$(-\infty, u]$
GreaterThan(l)$[l, \infty)$
EqualTo(v)$\{v\}$
Interval(l, u)$[l, u]$
Integer()$\mathbb{Z}$
ZeroOne()$\{ 0, 1 \}$
Semicontinuous(l, u)$\{ 0\} \cup [l, u]$
Semiinteger(l, u)$\{ 0\} \cup \{l,l+1,\ldots,u-1,u\}$

Vector cones

The vector-valued set types implemented in MathOptInterface.jl are:

SetDescription
Reals(d)$\mathbb{R}^{d}$
Zeros(d)$0^{d}$
Nonnegatives(d)$\{ x \in \mathbb{R}^{d} : x \ge 0 \}$
Nonpositives(d)$\{ x \in \mathbb{R}^{d} : x \le 0 \}$
SecondOrderCone(d)$\{ (t,x) \in \mathbb{R}^{d} : t \ge \lVert x \rVert_2 \}$
RotatedSecondOrderCone(d)$\{ (t,u,x) \in \mathbb{R}^{d} : 2tu \ge \lVert x \rVert_2^2, t \ge 0,u \ge 0 \}$
ExponentialCone()$\{ (x,y,z) \in \mathbb{R}^3 : y \exp (x/y) \le z, y > 0 \}$
DualExponentialCone()$\{ (u,v,w) \in \mathbb{R}^3 : -u \exp (v/u) \le \exp(1) w, u < 0 \}$
GeometricMeanCone(d)$\{ (t,x) \in \mathbb{R}^{1+n} : x \ge 0, t \le \sqrt[n]{x_1 x_2 \cdots x_n} \}$ where $n$ is $d - 1$
PowerCone(α)$\{ (x,y,z) \in \mathbb{R}^3 : x^{\alpha} y^{1-\alpha} \ge |z|, x \ge 0,y \ge 0 \}$
DualPowerCone(α)$\{ (u,v,w) \in \mathbb{R}^3 : \left(\frac{u}{\alpha}\right)^{\alpha}\left(\frac{v}{1-\alpha}\right)^{1-\alpha} \ge |w|, u,v \ge 0 \}$
NormOneCone(d)$\{ (t,x) \in \mathbb{R}^{d} : t \ge \sum_i \lvert x_i \rvert \}$
NormInfinityCone(d)$\{ (t,x) \in \mathbb{R}^{d} : t \ge \max_i \lvert x_i \rvert \}$
RelativeEntropyCone(d)$\{ (u, v, w) \in \mathbb{R}^{d} : u \ge \sum_i w_i \log (\frac{w_i}{v_i}), v_i \ge 0, w_i \ge 0 \}$
HyperRectangle(l, u)$\{x \in \bar{\mathbb{R}}^d: x_i \in [l_i, u_i] \forall i=1,\ldots,d\}$
NormCone(p, d)$\{ (t,x) \in \mathbb{R}^{d} : t \ge \left(\sum\limits_i \lvert x_i \rvert^p\right)^{\frac{1}{p}} \}$

Matrix cones

The matrix-valued set types implemented in MathOptInterface.jl are:

SetDescription
RootDetConeTriangle(d)$\{ (t,X) \in \mathbb{R}^{1+d(1+d)/2} : t \le \det(X)^{1/d}, X \mbox{ is the upper triangle of a PSD matrix} \}$
RootDetConeSquare(d)$\{ (t,X) \in \mathbb{R}^{1+d^2} : t \le \det(X)^{1/d}, X \mbox{ is a PSD matrix} \}$
PositiveSemidefiniteConeTriangle(d)$\{ X \in \mathbb{R}^{d(d+1)/2} : X \mbox{ is the upper triangle of a PSD matrix} \}$
PositiveSemidefiniteConeSquare(d)$\{ X \in \mathbb{R}^{d^2} : X \mbox{ is a PSD matrix} \}$
LogDetConeTriangle(d)$\{ (t,u,X) \in \mathbb{R}^{2+d(1+d)/2} : t \le u\log(\det(X/u)), X \mbox{ is the upper triangle of a PSD matrix}, u > 0 \}$
LogDetConeSquare(d)$\{ (t,u,X) \in \mathbb{R}^{2+d^2} : t \le u \log(\det(X/u)), X \mbox{ is a PSD matrix}, u > 0 \}$
NormSpectralCone(r, c)$\{ (t, X) \in \mathbb{R}^{1 + r \times c} : t \ge \sigma_1(X), X \mbox{ is a } r\times c\mbox{ matrix} \}$
NormNuclearCone(r, c)$\{ (t, X) \in \mathbb{R}^{1 + r \times c} : t \ge \sum_i \sigma_i(X), X \mbox{ is a } r\times c\mbox{ matrix} \}$
HermitianPositiveSemidefiniteConeTriangle(d)The cone of Hermitian positive semidefinite matrices, with
side_dimension rows and columns.
Scaled(S)The set S scaled so that Utilities.set_dot corresponds to LinearAlgebra.dot

Some of these cones can take two forms: XXXConeTriangle and XXXConeSquare.

In XXXConeTriangle sets, the matrix is assumed to be symmetric, and the elements are provided by a vector, in which the entries of the upper-right triangular part of the matrix are given column by column (or equivalently, the entries of the lower-left triangular part are given row by row).

In XXXConeSquare sets, the entries of the matrix are given column by column (or equivalently, row by row), and the matrix is constrained to be symmetric. As an example, given a 2-by-2 matrix of variables X and a one-dimensional variable t, we can specify a root-det constraint as [t, X11, X12, X22] ∈ RootDetConeTriangle or [t, X11, X12, X21, X22] ∈ RootDetConeSquare.

We provide both forms to enable flexibility for solvers who may natively support one or the other. Transformations between XXXConeTriangle and XXXConeSquare are handled by bridges, which removes the chance of conversion mistakes by users or solver developers.

Multi-dimensional sets with combinatorial structure

Other sets are vector-valued, with a particular combinatorial structure. Read their docstrings for more information on how to interpret them.

SetDescription
SOS1A Special Ordered Set (SOS) of Type I
SOS2A Special Ordered Set (SOS) of Type II
IndicatorA set to specify an indicator constraint
ComplementsA set to specify a mixed complementarity constraint
AllDifferentThe all_different global constraint
BinPackingThe bin_packing global constraint
CircuitThe circuit global constraint
CountAtLeastThe at_least global constraint
CountBelongsThe nvalue global constraint
CountDistinctThe distinct global constraint
CountGreaterThanThe count_gt global constraint
CumulativeThe cumulative global constraint
PathThe path global constraint
TableThe table global constraint
diff --git a/dev/moi/manual/variables/index.html b/dev/moi/manual/variables/index.html index 02e141388c3..b7950e674d0 100644 --- a/dev/moi/manual/variables/index.html +++ b/dev/moi/manual/variables/index.html @@ -14,4 +14,4 @@ false
Warning

Not all ModelLike models support deleting variables. A DeleteNotAllowed error is thrown if this is not supported.

Variable attributes

The following attributes are available for variables:

Get and set these attributes using get and set.

julia> MOI.set(model, MOI.VariableName(), x, "var_x")
 
 julia> MOI.get(model, MOI.VariableName(), x)
-"var_x"
+"var_x" diff --git a/dev/moi/reference/callbacks/index.html b/dev/moi/reference/callbacks/index.html index 8e686718239..95cdb565041 100644 --- a/dev/moi/reference/callbacks/index.html +++ b/dev/moi/reference/callbacks/index.html @@ -33,4 +33,4 @@ MOI.submit(optimizer, MOI.HeuristicSolution(callback_data), x, values) end -endsource
MathOptInterface.HeuristicSolutionType
HeuristicSolution(callback_data)

Heuristically obtained feasible solution. The solution is submitted as variables, values where values[i] gives the value of variables[i], similarly to set. The submit call returns a HeuristicSolutionStatus indicating whether the provided solution was accepted or rejected.

This can be submitted only from the HeuristicCallback. The field callback_data is a solver-specific callback type that is passed as the argument to the heuristic callback.

Some solvers require a complete solution, others only partial solutions.

source
MathOptInterface.HeuristicSolutionStatusType
HeuristicSolutionStatus

An Enum of possible return values for submit with HeuristicSolution. This informs whether the heuristic solution was accepted or rejected.

Values

Possible values are:

source
MathOptInterface.HEURISTIC_SOLUTION_ACCEPTEDConstant
HEURISTIC_SOLUTION_ACCEPTED::HeuristicSolutionStatus

An instance of the HeuristicSolutionStatus enum.

HEURISTIC_SOLUTION_ACCEPTED: The heuristic solution was accepted

source
MathOptInterface.HEURISTIC_SOLUTION_REJECTEDConstant
HEURISTIC_SOLUTION_REJECTED::HeuristicSolutionStatus

An instance of the HeuristicSolutionStatus enum.

HEURISTIC_SOLUTION_REJECTED: The heuristic solution was rejected

source
MathOptInterface.HEURISTIC_SOLUTION_UNKNOWNConstant
HEURISTIC_SOLUTION_UNKNOWN::HeuristicSolutionStatus

An instance of the HeuristicSolutionStatus enum.

HEURISTIC_SOLUTION_UNKNOWN: No information available on the acceptance

source
+endsource
MathOptInterface.HeuristicSolutionType
HeuristicSolution(callback_data)

Heuristically obtained feasible solution. The solution is submitted as variables, values where values[i] gives the value of variables[i], similarly to set. The submit call returns a HeuristicSolutionStatus indicating whether the provided solution was accepted or rejected.

This can be submitted only from the HeuristicCallback. The field callback_data is a solver-specific callback type that is passed as the argument to the heuristic callback.

Some solvers require a complete solution, others only partial solutions.

source
MathOptInterface.HeuristicSolutionStatusType
HeuristicSolutionStatus

An Enum of possible return values for submit with HeuristicSolution. This informs whether the heuristic solution was accepted or rejected.

Values

Possible values are:

source
MathOptInterface.HEURISTIC_SOLUTION_ACCEPTEDConstant
HEURISTIC_SOLUTION_ACCEPTED::HeuristicSolutionStatus

An instance of the HeuristicSolutionStatus enum.

HEURISTIC_SOLUTION_ACCEPTED: The heuristic solution was accepted

source
MathOptInterface.HEURISTIC_SOLUTION_REJECTEDConstant
HEURISTIC_SOLUTION_REJECTED::HeuristicSolutionStatus

An instance of the HeuristicSolutionStatus enum.

HEURISTIC_SOLUTION_REJECTED: The heuristic solution was rejected

source
MathOptInterface.HEURISTIC_SOLUTION_UNKNOWNConstant
HEURISTIC_SOLUTION_UNKNOWN::HeuristicSolutionStatus

An instance of the HeuristicSolutionStatus enum.

HEURISTIC_SOLUTION_UNKNOWN: No information available on the acceptance

source
diff --git a/dev/moi/reference/constraints/index.html b/dev/moi/reference/constraints/index.html index 3867fe0bdc0..06a4c848894 100644 --- a/dev/moi/reference/constraints/index.html +++ b/dev/moi/reference/constraints/index.html @@ -44,4 +44,4 @@ model::ModelLike, ::Type{F}, ::Type{S}, -)::Bool where {F<:AbstractFunction,S<:AbstractSet}

Return a Bool indicating whether model supports F-in-S constraints, that is, copy_to(model, src) does not throw UnsupportedConstraint when src contains F-in-S constraints. If F-in-S constraints are only not supported in specific circumstances, for example, F-in-S constraints cannot be combined with another type of constraint, it should still return true.

source

Attributes

MathOptInterface.AbstractConstraintAttributeType
AbstractConstraintAttribute

Abstract supertype for attribute objects that can be used to set or get attributes (properties) of constraints in the model.

source
MathOptInterface.ConstraintNameType
ConstraintName()

A constraint attribute for a string identifying the constraint.

It is valid for constraints variables to have the same name; however, constraints with duplicate names cannot be looked up using get, regardless of whether they have the same F-in-S type.

ConstraintName has a default value of "" if not set.

Notes

You should not implement ConstraintName for VariableIndex constraints.

source
MathOptInterface.ConstraintPrimalStartType
ConstraintPrimalStart()

A constraint attribute for the initial assignment to some constraint's ConstraintPrimal that the optimizer may use to warm-start the solve.

May be nothing (unset), a number for AbstractScalarFunction, or a vector for AbstractVectorFunction.

source
MathOptInterface.ConstraintDualStartType
ConstraintDualStart()

A constraint attribute for the initial assignment to some constraint's ConstraintDual that the optimizer may use to warm-start the solve.

May be nothing (unset), a number for AbstractScalarFunction, or a vector for AbstractVectorFunction.

source
MathOptInterface.ConstraintPrimalType
ConstraintPrimal(result_index::Int = 1)

A constraint attribute for the assignment to some constraint's primal value in result result_index.

If the constraint is f(x) in S, then in most cases the ConstraintPrimal is the value of f, evaluated at the corresponding VariablePrimal solution.

However, some conic solvers reformulate b - Ax in S to s = b - Ax, s in S. These solvers may return the value of s for ConstraintPrimal, rather than b - Ax. (Although these are constrained by an equality constraint, due to numerical tolerances they may not be identical.)

If the solver does not have a primal value for the constraint because the result_index is beyond the available solutions (whose number is indicated by the ResultCount attribute), getting this attribute must throw a ResultIndexBoundsError. Otherwise, if the result is unavailable for another reason (for instance, only a dual solution is available), the result is undefined. Users should first check PrimalStatus before accessing the ConstraintPrimal attribute.

If result_index is omitted, it is 1 by default. See ResultCount for information on how the results are ordered.

source
MathOptInterface.ConstraintDualType
ConstraintDual(result_index::Int = 1)

A constraint attribute for the assignment to some constraint's dual value in result result_index. If result_index is omitted, it is 1 by default.

If the solver does not have a dual value for the variable because the result_index is beyond the available solutions (whose number is indicated by the ResultCount attribute), getting this attribute must throw a ResultIndexBoundsError. Otherwise, if the result is unavailable for another reason (for instance, only a primal solution is available), the result is undefined. Users should first check DualStatus before accessing the ConstraintDual attribute.

See ResultCount for information on how the results are ordered.

source
MathOptInterface.ConstraintBasisStatusType
ConstraintBasisStatus(result_index::Int = 1)

A constraint attribute for the BasisStatusCode of some constraint in result result_index, with respect to an available optimal solution basis. If result_index is omitted, it is 1 by default.

If the solver does not have a basis status for the constraint because the result_index is beyond the available solutions (whose number is indicated by the ResultCount attribute), getting this attribute must throw a ResultIndexBoundsError. Otherwise, if the result is unavailable for another reason (for instance, only a dual solution is available), the result is undefined. Users should first check PrimalStatus before accessing the ConstraintBasisStatus attribute.

See ResultCount for information on how the results are ordered.

Notes

For the basis status of a variable, query VariableBasisStatus.

ConstraintBasisStatus does not apply to VariableIndex constraints. You can infer the basis status of a VariableIndex constraint by looking at the result of VariableBasisStatus.

source
MathOptInterface.ConstraintFunctionType
ConstraintFunction()

A constraint attribute for the AbstractFunction object used to define the constraint.

It is guaranteed to be equivalent but not necessarily identical to the function provided by the user.

source
MathOptInterface.CanonicalConstraintFunctionType
CanonicalConstraintFunction()

A constraint attribute for a canonical representation of the AbstractFunction object used to define the constraint.

Getting this attribute is guaranteed to return a function that is equivalent but not necessarily identical to the function provided by the user.

By default, MOI.get(model, MOI.CanonicalConstraintFunction(), ci) fallbacks to MOI.Utilities.canonical(MOI.get(model, MOI.ConstraintFunction(), ci)). However, if model knows that the constraint function is canonical then it can implement a specialized method that directly return the function without calling Utilities.canonical. Therefore, the value returned cannot be assumed to be a copy of the function stored in model. Moreover, Utilities.Model checks with Utilities.is_canonical whether the function stored internally is already canonical and if it's the case, then it returns the function stored internally instead of a copy.

source
MathOptInterface.ConstraintSetType
ConstraintSet()

A constraint attribute for the AbstractSet object used to define the constraint.

source
MathOptInterface.BasisStatusCodeType
BasisStatusCode

An Enum of possible values for the ConstraintBasisStatus and VariableBasisStatus attributes, explaining the status of a given element with respect to an optimal solution basis.

Notes

  • NONBASIC_AT_LOWER and NONBASIC_AT_UPPER should be used only for constraints with the Interval set. In this case, they are necessary to distinguish which side of the constraint is active. One-sided constraints (for example, LessThan and GreaterThan) should use NONBASIC instead of the NONBASIC_AT_* values. This restriction does not apply to VariableBasisStatus, which should return NONBASIC_AT_* regardless of whether the alternative bound exists.

  • In linear programs, SUPER_BASIC occurs when a variable with no bounds is not in the basis.

Values

Possible values are:

source
MathOptInterface.BASICConstant
BASIC::BasisStatusCode

An instance of the BasisStatusCode enum.

BASIC: element is in the basis

source
MathOptInterface.NONBASICConstant
NONBASIC::BasisStatusCode

An instance of the BasisStatusCode enum.

NONBASIC: element is not in the basis

source
MathOptInterface.NONBASIC_AT_LOWERConstant
NONBASIC_AT_LOWER::BasisStatusCode

An instance of the BasisStatusCode enum.

NONBASIC_AT_LOWER: element is not in the basis and is at its lower bound

source
MathOptInterface.NONBASIC_AT_UPPERConstant
NONBASIC_AT_UPPER::BasisStatusCode

An instance of the BasisStatusCode enum.

NONBASIC_AT_UPPER: element is not in the basis and is at its upper bound

source
MathOptInterface.SUPER_BASICConstant
SUPER_BASIC::BasisStatusCode

An instance of the BasisStatusCode enum.

SUPER_BASIC: element is not in the basis but is also not at one of its bounds

source
+)::Bool where {F<:AbstractFunction,S<:AbstractSet}

Return a Bool indicating whether model supports F-in-S constraints, that is, copy_to(model, src) does not throw UnsupportedConstraint when src contains F-in-S constraints. If F-in-S constraints are only not supported in specific circumstances, for example, F-in-S constraints cannot be combined with another type of constraint, it should still return true.

source

Attributes

MathOptInterface.AbstractConstraintAttributeType
AbstractConstraintAttribute

Abstract supertype for attribute objects that can be used to set or get attributes (properties) of constraints in the model.

source
MathOptInterface.ConstraintNameType
ConstraintName()

A constraint attribute for a string identifying the constraint.

It is valid for constraints variables to have the same name; however, constraints with duplicate names cannot be looked up using get, regardless of whether they have the same F-in-S type.

ConstraintName has a default value of "" if not set.

Notes

You should not implement ConstraintName for VariableIndex constraints.

source
MathOptInterface.ConstraintPrimalStartType
ConstraintPrimalStart()

A constraint attribute for the initial assignment to some constraint's ConstraintPrimal that the optimizer may use to warm-start the solve.

May be nothing (unset), a number for AbstractScalarFunction, or a vector for AbstractVectorFunction.

source
MathOptInterface.ConstraintDualStartType
ConstraintDualStart()

A constraint attribute for the initial assignment to some constraint's ConstraintDual that the optimizer may use to warm-start the solve.

May be nothing (unset), a number for AbstractScalarFunction, or a vector for AbstractVectorFunction.

source
MathOptInterface.ConstraintPrimalType
ConstraintPrimal(result_index::Int = 1)

A constraint attribute for the assignment to some constraint's primal value in result result_index.

If the constraint is f(x) in S, then in most cases the ConstraintPrimal is the value of f, evaluated at the corresponding VariablePrimal solution.

However, some conic solvers reformulate b - Ax in S to s = b - Ax, s in S. These solvers may return the value of s for ConstraintPrimal, rather than b - Ax. (Although these are constrained by an equality constraint, due to numerical tolerances they may not be identical.)

If the solver does not have a primal value for the constraint because the result_index is beyond the available solutions (whose number is indicated by the ResultCount attribute), getting this attribute must throw a ResultIndexBoundsError. Otherwise, if the result is unavailable for another reason (for instance, only a dual solution is available), the result is undefined. Users should first check PrimalStatus before accessing the ConstraintPrimal attribute.

If result_index is omitted, it is 1 by default. See ResultCount for information on how the results are ordered.

source
MathOptInterface.ConstraintDualType
ConstraintDual(result_index::Int = 1)

A constraint attribute for the assignment to some constraint's dual value in result result_index. If result_index is omitted, it is 1 by default.

If the solver does not have a dual value for the variable because the result_index is beyond the available solutions (whose number is indicated by the ResultCount attribute), getting this attribute must throw a ResultIndexBoundsError. Otherwise, if the result is unavailable for another reason (for instance, only a primal solution is available), the result is undefined. Users should first check DualStatus before accessing the ConstraintDual attribute.

See ResultCount for information on how the results are ordered.

source
MathOptInterface.ConstraintBasisStatusType
ConstraintBasisStatus(result_index::Int = 1)

A constraint attribute for the BasisStatusCode of some constraint in result result_index, with respect to an available optimal solution basis. If result_index is omitted, it is 1 by default.

If the solver does not have a basis status for the constraint because the result_index is beyond the available solutions (whose number is indicated by the ResultCount attribute), getting this attribute must throw a ResultIndexBoundsError. Otherwise, if the result is unavailable for another reason (for instance, only a dual solution is available), the result is undefined. Users should first check PrimalStatus before accessing the ConstraintBasisStatus attribute.

See ResultCount for information on how the results are ordered.

Notes

For the basis status of a variable, query VariableBasisStatus.

ConstraintBasisStatus does not apply to VariableIndex constraints. You can infer the basis status of a VariableIndex constraint by looking at the result of VariableBasisStatus.

source
MathOptInterface.ConstraintFunctionType
ConstraintFunction()

A constraint attribute for the AbstractFunction object used to define the constraint.

It is guaranteed to be equivalent but not necessarily identical to the function provided by the user.

source
MathOptInterface.CanonicalConstraintFunctionType
CanonicalConstraintFunction()

A constraint attribute for a canonical representation of the AbstractFunction object used to define the constraint.

Getting this attribute is guaranteed to return a function that is equivalent but not necessarily identical to the function provided by the user.

By default, MOI.get(model, MOI.CanonicalConstraintFunction(), ci) fallbacks to MOI.Utilities.canonical(MOI.get(model, MOI.ConstraintFunction(), ci)). However, if model knows that the constraint function is canonical then it can implement a specialized method that directly return the function without calling Utilities.canonical. Therefore, the value returned cannot be assumed to be a copy of the function stored in model. Moreover, Utilities.Model checks with Utilities.is_canonical whether the function stored internally is already canonical and if it's the case, then it returns the function stored internally instead of a copy.

source
MathOptInterface.ConstraintSetType
ConstraintSet()

A constraint attribute for the AbstractSet object used to define the constraint.

source
MathOptInterface.BasisStatusCodeType
BasisStatusCode

An Enum of possible values for the ConstraintBasisStatus and VariableBasisStatus attributes, explaining the status of a given element with respect to an optimal solution basis.

Notes

  • NONBASIC_AT_LOWER and NONBASIC_AT_UPPER should be used only for constraints with the Interval set. In this case, they are necessary to distinguish which side of the constraint is active. One-sided constraints (for example, LessThan and GreaterThan) should use NONBASIC instead of the NONBASIC_AT_* values. This restriction does not apply to VariableBasisStatus, which should return NONBASIC_AT_* regardless of whether the alternative bound exists.

  • In linear programs, SUPER_BASIC occurs when a variable with no bounds is not in the basis.

Values

Possible values are:

source
MathOptInterface.BASICConstant
BASIC::BasisStatusCode

An instance of the BasisStatusCode enum.

BASIC: element is in the basis

source
MathOptInterface.NONBASICConstant
NONBASIC::BasisStatusCode

An instance of the BasisStatusCode enum.

NONBASIC: element is not in the basis

source
MathOptInterface.NONBASIC_AT_LOWERConstant
NONBASIC_AT_LOWER::BasisStatusCode

An instance of the BasisStatusCode enum.

NONBASIC_AT_LOWER: element is not in the basis and is at its lower bound

source
MathOptInterface.NONBASIC_AT_UPPERConstant
NONBASIC_AT_UPPER::BasisStatusCode

An instance of the BasisStatusCode enum.

NONBASIC_AT_UPPER: element is not in the basis and is at its upper bound

source
MathOptInterface.SUPER_BASICConstant
SUPER_BASIC::BasisStatusCode

An instance of the BasisStatusCode enum.

SUPER_BASIC: element is not in the basis but is also not at one of its bounds

source
diff --git a/dev/moi/reference/errors/index.html b/dev/moi/reference/errors/index.html index 370b258871a..1db314495ba 100644 --- a/dev/moi/reference/errors/index.html +++ b/dev/moi/reference/errors/index.html @@ -57,4 +57,4 @@ julia> throw(MOI.UnsupportedNonlinearOperator(:black_box)) ERROR: MathOptInterface.UnsupportedNonlinearOperator: The nonlinear operator `:black_box` is not supported by the model. Stacktrace: -[...]source

Note that setting the ConstraintFunction of a VariableIndex constraint is not allowed:

MathOptInterface.SettingVariableIndexNotAllowedType
SettingVariableIndexNotAllowed()

Error type that should be thrown when the user calls set to change the ConstraintFunction of a VariableIndex constraint.

source
+[...]source

Note that setting the ConstraintFunction of a VariableIndex constraint is not allowed:

MathOptInterface.SettingVariableIndexNotAllowedType
SettingVariableIndexNotAllowed()

Error type that should be thrown when the user calls set to change the ConstraintFunction of a VariableIndex constraint.

source
diff --git a/dev/moi/reference/models/index.html b/dev/moi/reference/models/index.html index 33168df2264..9f79d0f5efd 100644 --- a/dev/moi/reference/models/index.html +++ b/dev/moi/reference/models/index.html @@ -7,7 +7,7 @@ model::MOI.ModelLike, attr::MOI.AbstractConstraintAttribute, bridge::AbstractBridge, -)

Return the value of the attribute attr of the model model for the constraint bridged by bridge.

source
get(model::GenericModel, attr::MathOptInterface.AbstractOptimizerAttribute)

Return the value of the attribute attr from the model's MOI backend.

source
get(model::GenericModel, attr::MathOptInterface.AbstractModelAttribute)

Return the value of the attribute attr from the model's MOI backend.

source
get(optimizer::AbstractOptimizer, attr::AbstractOptimizerAttribute)

Return an attribute attr of the optimizer optimizer.

get(model::ModelLike, attr::AbstractModelAttribute)

Return an attribute attr of the model model.

get(model::ModelLike, attr::AbstractVariableAttribute, v::VariableIndex)

If the attribute attr is set for the variable v in the model model, return its value, return nothing otherwise. If the attribute attr is not supported by model then an error should be thrown instead of returning nothing.

get(model::ModelLike, attr::AbstractVariableAttribute, v::Vector{VariableIndex})

Return a vector of attributes corresponding to each variable in the collection v in the model model.

get(model::ModelLike, attr::AbstractConstraintAttribute, c::ConstraintIndex)

If the attribute attr is set for the constraint c in the model model, return its value, return nothing otherwise. If the attribute attr is not supported by model then an error should be thrown instead of returning nothing.

get(
+)

Return the value of the attribute attr of the model model for the constraint bridged by bridge.

source
get(model::GenericModel, attr::MathOptInterface.AbstractOptimizerAttribute)

Return the value of the attribute attr from the model's MOI backend.

source
get(model::GenericModel, attr::MathOptInterface.AbstractModelAttribute)

Return the value of the attribute attr from the model's MOI backend.

source
get(optimizer::AbstractOptimizer, attr::AbstractOptimizerAttribute)

Return an attribute attr of the optimizer optimizer.

get(model::ModelLike, attr::AbstractModelAttribute)

Return an attribute attr of the model model.

get(model::ModelLike, attr::AbstractVariableAttribute, v::VariableIndex)

If the attribute attr is set for the variable v in the model model, return its value, return nothing otherwise. If the attribute attr is not supported by model then an error should be thrown instead of returning nothing.

get(model::ModelLike, attr::AbstractVariableAttribute, v::Vector{VariableIndex})

Return a vector of attributes corresponding to each variable in the collection v in the model model.

get(model::ModelLike, attr::AbstractConstraintAttribute, c::ConstraintIndex)

If the attribute attr is set for the constraint c in the model model, return its value, return nothing otherwise. If the attribute attr is not supported by model then an error should be thrown instead of returning nothing.

get(
     model::ModelLike,
     attr::AbstractConstraintAttribute,
     c::Vector{ConstraintIndex{F,S}},
@@ -139,4 +139,4 @@
 MOI.get(model, MOI.RelativeGapTolerance())  # returns 1e-3
 # ... and the relative gap of the obtained solution is smaller or equal to the
 # tolerance
-MOI.get(model, MOI.RelativeGap())  # should return something ≤ 1e-3
Warning

The mathematical definition of "relative gap", and its allowed range, are solver-dependent. Typically, solvers expect a value between 0.0 and 1.0.

source
MathOptInterface.AutomaticDifferentiationBackendType
AutomaticDifferentiationBackend() <: AbstractOptimizerAttribute

An AbstractOptimizerAttribute for setting the automatic differentiation backend used by the solver.

The value must be a subtype of Nonlinear.AbstractAutomaticDifferentiation.

source

List of attributes useful for optimizers

MathOptInterface.TerminationStatusType
TerminationStatus()

A model attribute for the TerminationStatusCode explaining why the optimizer stopped.

source
MathOptInterface.TerminationStatusCodeType
TerminationStatusCode

An Enum of possible values for the TerminationStatus attribute. This attribute is meant to explain the reason why the optimizer stopped executing in the most recent call to optimize!.

Values

Possible values are:

  • OPTIMIZE_NOT_CALLED: The algorithm has not started.
  • OPTIMAL: The algorithm found a globally optimal solution.
  • INFEASIBLE: The algorithm concluded that no feasible solution exists.
  • DUAL_INFEASIBLE: The algorithm concluded that no dual bound exists for the problem. If, additionally, a feasible (primal) solution is known to exist, this status typically implies that the problem is unbounded, with some technical exceptions.
  • LOCALLY_SOLVED: The algorithm converged to a stationary point, local optimal solution, could not find directions for improvement, or otherwise completed its search without global guarantees.
  • LOCALLY_INFEASIBLE: The algorithm converged to an infeasible point or otherwise completed its search without finding a feasible solution, without guarantees that no feasible solution exists.
  • INFEASIBLE_OR_UNBOUNDED: The algorithm stopped because it decided that the problem is infeasible or unbounded; this occasionally happens during MIP presolve.
  • ALMOST_OPTIMAL: The algorithm found a globally optimal solution to relaxed tolerances.
  • ALMOST_INFEASIBLE: The algorithm concluded that no feasible solution exists within relaxed tolerances.
  • ALMOST_DUAL_INFEASIBLE: The algorithm concluded that no dual bound exists for the problem within relaxed tolerances.
  • ALMOST_LOCALLY_SOLVED: The algorithm converged to a stationary point, local optimal solution, or could not find directions for improvement within relaxed tolerances.
  • ITERATION_LIMIT: An iterative algorithm stopped after conducting the maximum number of iterations.
  • TIME_LIMIT: The algorithm stopped after a user-specified computation time.
  • NODE_LIMIT: A branch-and-bound algorithm stopped because it explored a maximum number of nodes in the branch-and-bound tree.
  • SOLUTION_LIMIT: The algorithm stopped because it found the required number of solutions. This is often used in MIPs to get the solver to return the first feasible solution it encounters.
  • MEMORY_LIMIT: The algorithm stopped because it ran out of memory.
  • OBJECTIVE_LIMIT: The algorithm stopped because it found a solution better than a minimum limit set by the user.
  • NORM_LIMIT: The algorithm stopped because the norm of an iterate became too large.
  • OTHER_LIMIT: The algorithm stopped due to a limit not covered by one of the _LIMIT_ statuses above.
  • SLOW_PROGRESS: The algorithm stopped because it was unable to continue making progress towards the solution.
  • NUMERICAL_ERROR: The algorithm stopped because it encountered unrecoverable numerical error.
  • INVALID_MODEL: The algorithm stopped because the model is invalid.
  • INVALID_OPTION: The algorithm stopped because it was provided an invalid option.
  • INTERRUPTED: The algorithm stopped because of an interrupt signal.
  • OTHER_ERROR: The algorithm stopped because of an error not covered by one of the statuses defined above.
source
MathOptInterface.OPTIMIZE_NOT_CALLEDConstant
OPTIMIZE_NOT_CALLED::TerminationStatusCode

An instance of the TerminationStatusCode enum.

OPTIMIZE_NOT_CALLED: The algorithm has not started.

source
MathOptInterface.OPTIMALConstant
OPTIMAL::TerminationStatusCode

An instance of the TerminationStatusCode enum.

OPTIMAL: The algorithm found a globally optimal solution.

source
MathOptInterface.INFEASIBLEConstant
INFEASIBLE::TerminationStatusCode

An instance of the TerminationStatusCode enum.

INFEASIBLE: The algorithm concluded that no feasible solution exists.

source
MathOptInterface.DUAL_INFEASIBLEConstant
DUAL_INFEASIBLE::TerminationStatusCode

An instance of the TerminationStatusCode enum.

DUAL_INFEASIBLE: The algorithm concluded that no dual bound exists for the problem. If, additionally, a feasible (primal) solution is known to exist, this status typically implies that the problem is unbounded, with some technical exceptions.

source
MathOptInterface.LOCALLY_SOLVEDConstant
LOCALLY_SOLVED::TerminationStatusCode

An instance of the TerminationStatusCode enum.

LOCALLY_SOLVED: The algorithm converged to a stationary point, local optimal solution, could not find directions for improvement, or otherwise completed its search without global guarantees.

source
MathOptInterface.LOCALLY_INFEASIBLEConstant
LOCALLY_INFEASIBLE::TerminationStatusCode

An instance of the TerminationStatusCode enum.

LOCALLY_INFEASIBLE: The algorithm converged to an infeasible point or otherwise completed its search without finding a feasible solution, without guarantees that no feasible solution exists.

source
MathOptInterface.INFEASIBLE_OR_UNBOUNDEDConstant
INFEASIBLE_OR_UNBOUNDED::TerminationStatusCode

An instance of the TerminationStatusCode enum.

INFEASIBLE_OR_UNBOUNDED: The algorithm stopped because it decided that the problem is infeasible or unbounded; this occasionally happens during MIP presolve.

source
MathOptInterface.ALMOST_OPTIMALConstant
ALMOST_OPTIMAL::TerminationStatusCode

An instance of the TerminationStatusCode enum.

ALMOST_OPTIMAL: The algorithm found a globally optimal solution to relaxed tolerances.

source
MathOptInterface.ALMOST_INFEASIBLEConstant
ALMOST_INFEASIBLE::TerminationStatusCode

An instance of the TerminationStatusCode enum.

ALMOST_INFEASIBLE: The algorithm concluded that no feasible solution exists within relaxed tolerances.

source
MathOptInterface.ALMOST_DUAL_INFEASIBLEConstant
ALMOST_DUAL_INFEASIBLE::TerminationStatusCode

An instance of the TerminationStatusCode enum.

ALMOST_DUAL_INFEASIBLE: The algorithm concluded that no dual bound exists for the problem within relaxed tolerances.

source
MathOptInterface.ALMOST_LOCALLY_SOLVEDConstant
ALMOST_LOCALLY_SOLVED::TerminationStatusCode

An instance of the TerminationStatusCode enum.

ALMOST_LOCALLY_SOLVED: The algorithm converged to a stationary point, local optimal solution, or could not find directions for improvement within relaxed tolerances.

source
MathOptInterface.ITERATION_LIMITConstant
ITERATION_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

ITERATION_LIMIT: An iterative algorithm stopped after conducting the maximum number of iterations.

source
MathOptInterface.TIME_LIMITConstant
TIME_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

TIME_LIMIT: The algorithm stopped after a user-specified computation time.

source
MathOptInterface.NODE_LIMITConstant
NODE_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

NODE_LIMIT: A branch-and-bound algorithm stopped because it explored a maximum number of nodes in the branch-and-bound tree.

source
MathOptInterface.SOLUTION_LIMITConstant
SOLUTION_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

SOLUTION_LIMIT: The algorithm stopped because it found the required number of solutions. This is often used in MIPs to get the solver to return the first feasible solution it encounters.

source
MathOptInterface.MEMORY_LIMITConstant
MEMORY_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

MEMORY_LIMIT: The algorithm stopped because it ran out of memory.

source
MathOptInterface.OBJECTIVE_LIMITConstant
OBJECTIVE_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

OBJECTIVE_LIMIT: The algorithm stopped because it found a solution better than a minimum limit set by the user.

source
MathOptInterface.NORM_LIMITConstant
NORM_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

NORM_LIMIT: The algorithm stopped because the norm of an iterate became too large.

source
MathOptInterface.OTHER_LIMITConstant
OTHER_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

OTHER_LIMIT: The algorithm stopped due to a limit not covered by one of the _LIMIT_ statuses above.

source
MathOptInterface.SLOW_PROGRESSConstant
SLOW_PROGRESS::TerminationStatusCode

An instance of the TerminationStatusCode enum.

SLOW_PROGRESS: The algorithm stopped because it was unable to continue making progress towards the solution.

source
MathOptInterface.NUMERICAL_ERRORConstant
NUMERICAL_ERROR::TerminationStatusCode

An instance of the TerminationStatusCode enum.

NUMERICAL_ERROR: The algorithm stopped because it encountered unrecoverable numerical error.

source
MathOptInterface.INVALID_MODELConstant
INVALID_MODEL::TerminationStatusCode

An instance of the TerminationStatusCode enum.

INVALID_MODEL: The algorithm stopped because the model is invalid.

source
MathOptInterface.INVALID_OPTIONConstant
INVALID_OPTION::TerminationStatusCode

An instance of the TerminationStatusCode enum.

INVALID_OPTION: The algorithm stopped because it was provided an invalid option.

source
MathOptInterface.INTERRUPTEDConstant
INTERRUPTED::TerminationStatusCode

An instance of the TerminationStatusCode enum.

INTERRUPTED: The algorithm stopped because of an interrupt signal.

source
MathOptInterface.OTHER_ERRORConstant
OTHER_ERROR::TerminationStatusCode

An instance of the TerminationStatusCode enum.

OTHER_ERROR: The algorithm stopped because of an error not covered by one of the statuses defined above.

source
MathOptInterface.PrimalStatusType
PrimalStatus(result_index::Int = 1)

A model attribute for the ResultStatusCode of the primal result result_index. If result_index is omitted, it defaults to 1.

See ResultCount for information on how the results are ordered.

If result_index is larger than the value of ResultCount then NO_SOLUTION is returned.

source
MathOptInterface.DualStatusType
DualStatus(result_index::Int = 1)

A model attribute for the ResultStatusCode of the dual result result_index. If result_index is omitted, it defaults to 1.

See ResultCount for information on how the results are ordered.

If result_index is larger than the value of ResultCount then NO_SOLUTION is returned.

source
MathOptInterface.RawStatusStringType
RawStatusString()

A model attribute for a solver specific string explaining why the optimizer stopped.

source
MathOptInterface.ResultCountType
ResultCount()

A model attribute for the number of results available.

Order of solutions

A number of attributes contain an index, result_index, which is used to refer to one of the available results. Thus, result_index must be an integer between 1 and the number of available results.

As a general rule, the first result (result_index=1) is the most important result (for example, an optimal solution or an infeasibility certificate). Other results will typically be alternate solutions that the solver found during the search for the first result.

If a (local) optimal solution is available, that is, TerminationStatus is OPTIMAL or LOCALLY_SOLVED, the first result must correspond to the (locally) optimal solution. Other results may be alternative optimal solutions, or they may be other suboptimal solutions; use ObjectiveValue to distinguish between them.

If a primal or dual infeasibility certificate is available, that is, TerminationStatus is INFEASIBLE or DUAL_INFEASIBLE and the corresponding PrimalStatus or DualStatus is INFEASIBILITY_CERTIFICATE, then the first result must be a certificate. Other results may be alternate certificates, or infeasible points.

source
MathOptInterface.ObjectiveValueType
ObjectiveValue(result_index::Int = 1)

A model attribute for the objective value of the primal solution result_index.

If the solver does not have a primal value for the objective because the result_index is beyond the available solutions (whose number is indicated by the ResultCount attribute), getting this attribute must throw a ResultIndexBoundsError. Otherwise, if the result is unavailable for another reason (for instance, only a dual solution is available), the result is undefined. Users should first check PrimalStatus before accessing the ObjectiveValue attribute.

See ResultCount for information on how the results are ordered.

source
MathOptInterface.DualObjectiveValueType
DualObjectiveValue(result_index::Int = 1)

A model attribute for the value of the objective function of the dual problem for the result_indexth dual result.

If the solver does not have a dual value for the objective because the result_index is beyond the available solutions (whose number is indicated by the ResultCount attribute), getting this attribute must throw a ResultIndexBoundsError. Otherwise, if the result is unavailable for another reason (for instance, only a primal solution is available), the result is undefined. Users should first check DualStatus before accessing the DualObjectiveValue attribute.

See ResultCount for information on how the results are ordered.

source
MathOptInterface.ObjectiveBoundType
ObjectiveBound()

A model attribute for the best known bound on the optimal objective value.

source
MathOptInterface.RelativeGapType
RelativeGap()

A model attribute for the final relative optimality gap.

Warning

The definition of this gap is solver-dependent. However, most solvers implementing this attribute define the relative gap as some variation of $\frac{|b-f|}{|f|}$, where $b$ is the best bound and $f$ is the best feasible objective value.

source
MathOptInterface.SolveTimeSecType
SolveTimeSec()

A model attribute for the total elapsed solution time (in seconds) as reported by the optimizer.

source
MathOptInterface.SimplexIterationsType
SimplexIterations()

A model attribute for the cumulative number of simplex iterations during the optimization process.

For a mixed-integer program (MIP), the return value is the total simplex iterations for all nodes.

source
MathOptInterface.BarrierIterationsType
BarrierIterations()

A model attribute for the cumulative number of barrier iterations while solving a problem.

source
MathOptInterface.NodeCountType
NodeCount()

A model attribute for the total number of branch-and-bound nodes explored while solving a mixed-integer program (MIP).

source

ResultStatusCode

MathOptInterface.ResultStatusCodeType
ResultStatusCode

An Enum of possible values for the PrimalStatus and DualStatus attributes.

The values indicate how to interpret the result vector.

Values

Possible values are:

  • NO_SOLUTION: the result vector is empty.
  • FEASIBLE_POINT: the result vector is a feasible point.
  • NEARLY_FEASIBLE_POINT: the result vector is feasible if some constraint tolerances are relaxed.
  • INFEASIBLE_POINT: the result vector is an infeasible point.
  • INFEASIBILITY_CERTIFICATE: the result vector is an infeasibility certificate. If the PrimalStatus is INFEASIBILITY_CERTIFICATE, then the primal result vector is a certificate of dual infeasibility. If the DualStatus is INFEASIBILITY_CERTIFICATE, then the dual result vector is a proof of primal infeasibility.
  • NEARLY_INFEASIBILITY_CERTIFICATE: the result satisfies a relaxed criterion for a certificate of infeasibility.
  • REDUCTION_CERTIFICATE: the result vector is an ill-posed certificate; see this article for details. If the PrimalStatus is REDUCTION_CERTIFICATE, then the primal result vector is a proof that the dual problem is ill-posed. If the DualStatus is REDUCTION_CERTIFICATE, then the dual result vector is a proof that the primal is ill-posed.
  • NEARLY_REDUCTION_CERTIFICATE: the result satisfies a relaxed criterion for an ill-posed certificate.
  • UNKNOWN_RESULT_STATUS: the result vector contains a solution with an unknown interpretation.
  • OTHER_RESULT_STATUS: the result vector contains a solution with an interpretation not covered by one of the statuses defined above
source
MathOptInterface.NO_SOLUTIONConstant
NO_SOLUTION::ResultStatusCode

An instance of the ResultStatusCode enum.

NO_SOLUTION: the result vector is empty.

source
MathOptInterface.FEASIBLE_POINTConstant
FEASIBLE_POINT::ResultStatusCode

An instance of the ResultStatusCode enum.

FEASIBLE_POINT: the result vector is a feasible point.

source
MathOptInterface.NEARLY_FEASIBLE_POINTConstant
NEARLY_FEASIBLE_POINT::ResultStatusCode

An instance of the ResultStatusCode enum.

NEARLY_FEASIBLE_POINT: the result vector is feasible if some constraint tolerances are relaxed.

source
MathOptInterface.INFEASIBLE_POINTConstant
INFEASIBLE_POINT::ResultStatusCode

An instance of the ResultStatusCode enum.

INFEASIBLE_POINT: the result vector is an infeasible point.

source
MathOptInterface.INFEASIBILITY_CERTIFICATEConstant
INFEASIBILITY_CERTIFICATE::ResultStatusCode

An instance of the ResultStatusCode enum.

INFEASIBILITY_CERTIFICATE: the result vector is an infeasibility certificate. If the PrimalStatus is INFEASIBILITY_CERTIFICATE, then the primal result vector is a certificate of dual infeasibility. If the DualStatus is INFEASIBILITY_CERTIFICATE, then the dual result vector is a proof of primal infeasibility.

source
MathOptInterface.NEARLY_INFEASIBILITY_CERTIFICATEConstant
NEARLY_INFEASIBILITY_CERTIFICATE::ResultStatusCode

An instance of the ResultStatusCode enum.

NEARLY_INFEASIBILITY_CERTIFICATE: the result satisfies a relaxed criterion for a certificate of infeasibility.

source
MathOptInterface.REDUCTION_CERTIFICATEConstant
REDUCTION_CERTIFICATE::ResultStatusCode

An instance of the ResultStatusCode enum.

REDUCTION_CERTIFICATE: the result vector is an ill-posed certificate; see this article for details. If the PrimalStatus is REDUCTION_CERTIFICATE, then the primal result vector is a proof that the dual problem is ill-posed. If the DualStatus is REDUCTION_CERTIFICATE, then the dual result vector is a proof that the primal is ill-posed.

source
MathOptInterface.NEARLY_REDUCTION_CERTIFICATEConstant
NEARLY_REDUCTION_CERTIFICATE::ResultStatusCode

An instance of the ResultStatusCode enum.

NEARLY_REDUCTION_CERTIFICATE: the result satisfies a relaxed criterion for an ill-posed certificate.

source
MathOptInterface.UNKNOWN_RESULT_STATUSConstant
UNKNOWN_RESULT_STATUS::ResultStatusCode

An instance of the ResultStatusCode enum.

UNKNOWN_RESULT_STATUS: the result vector contains a solution with an unknown interpretation.

source
MathOptInterface.OTHER_RESULT_STATUSConstant
OTHER_RESULT_STATUS::ResultStatusCode

An instance of the ResultStatusCode enum.

OTHER_RESULT_STATUS: the result vector contains a solution with an interpretation not covered by one of the statuses defined above

source

Conflict Status

MathOptInterface.compute_conflict!Function
compute_conflict!(optimizer::AbstractOptimizer)

Computes a minimal subset of constraints such that the model with the other constraint removed is still infeasible.

Some solvers call a set of conflicting constraints an Irreducible Inconsistent Subsystem (IIS).

See also ConflictStatus and ConstraintConflictStatus.

Note

If the model is modified after a call to compute_conflict!, the implementor is not obliged to purge the conflict. Any calls to the above attributes may return values for the original conflict without a warning. Similarly, when modifying the model, the conflict can be discarded.

source
MathOptInterface.ConflictStatusType
ConflictStatus()

A model attribute for the ConflictStatusCode explaining why the conflict refiner stopped when computing the conflict.

source
MathOptInterface.ConstraintConflictStatusType
ConstraintConflictStatus()

A constraint attribute indicating whether the constraint participates in the conflict. Its type is ConflictParticipationStatusCode.

source
MathOptInterface.ConflictStatusCodeType
ConflictStatusCode

An Enum of possible values for the ConflictStatus attribute. This attribute is meant to explain the reason why the conflict finder stopped executing in the most recent call to compute_conflict!.

Possible values are:

  • COMPUTE_CONFLICT_NOT_CALLED: the function compute_conflict! has not yet been called
  • NO_CONFLICT_EXISTS: there is no conflict because the problem is feasible
  • NO_CONFLICT_FOUND: the solver could not find a conflict
  • CONFLICT_FOUND: at least one conflict could be found
source
MathOptInterface.ConflictParticipationStatusCodeType
ConflictParticipationStatusCode

An Enum of possible values for the ConstraintConflictStatus attribute. This attribute is meant to indicate whether a given constraint participates or not in the last computed conflict.

Values

Possible values are:

  • NOT_IN_CONFLICT: the constraint does not participate in the conflict
  • IN_CONFLICT: the constraint participates in the conflict
  • MAYBE_IN_CONFLICT: the constraint may participate in the conflict, the solver was not able to prove that the constraint can be excluded from the conflict
source
MathOptInterface.NOT_IN_CONFLICTConstant
NOT_IN_CONFLICT::ConflictParticipationStatusCode

An instance of the ConflictParticipationStatusCode enum.

NOT_IN_CONFLICT: the constraint does not participate in the conflict

source
MathOptInterface.IN_CONFLICTConstant
IN_CONFLICT::ConflictParticipationStatusCode

An instance of the ConflictParticipationStatusCode enum.

IN_CONFLICT: the constraint participates in the conflict

source
MathOptInterface.MAYBE_IN_CONFLICTConstant
MAYBE_IN_CONFLICT::ConflictParticipationStatusCode

An instance of the ConflictParticipationStatusCode enum.

MAYBE_IN_CONFLICT: the constraint may participate in the conflict, the solver was not able to prove that the constraint can be excluded from the conflict

source
+MOI.get(model, MOI.RelativeGap()) # should return something ≤ 1e-3
Warning

The mathematical definition of "relative gap", and its allowed range, are solver-dependent. Typically, solvers expect a value between 0.0 and 1.0.

source
MathOptInterface.AutomaticDifferentiationBackendType
AutomaticDifferentiationBackend() <: AbstractOptimizerAttribute

An AbstractOptimizerAttribute for setting the automatic differentiation backend used by the solver.

The value must be a subtype of Nonlinear.AbstractAutomaticDifferentiation.

source

List of attributes useful for optimizers

MathOptInterface.TerminationStatusType
TerminationStatus()

A model attribute for the TerminationStatusCode explaining why the optimizer stopped.

source
MathOptInterface.TerminationStatusCodeType
TerminationStatusCode

An Enum of possible values for the TerminationStatus attribute. This attribute is meant to explain the reason why the optimizer stopped executing in the most recent call to optimize!.

Values

Possible values are:

  • OPTIMIZE_NOT_CALLED: The algorithm has not started.
  • OPTIMAL: The algorithm found a globally optimal solution.
  • INFEASIBLE: The algorithm concluded that no feasible solution exists.
  • DUAL_INFEASIBLE: The algorithm concluded that no dual bound exists for the problem. If, additionally, a feasible (primal) solution is known to exist, this status typically implies that the problem is unbounded, with some technical exceptions.
  • LOCALLY_SOLVED: The algorithm converged to a stationary point, local optimal solution, could not find directions for improvement, or otherwise completed its search without global guarantees.
  • LOCALLY_INFEASIBLE: The algorithm converged to an infeasible point or otherwise completed its search without finding a feasible solution, without guarantees that no feasible solution exists.
  • INFEASIBLE_OR_UNBOUNDED: The algorithm stopped because it decided that the problem is infeasible or unbounded; this occasionally happens during MIP presolve.
  • ALMOST_OPTIMAL: The algorithm found a globally optimal solution to relaxed tolerances.
  • ALMOST_INFEASIBLE: The algorithm concluded that no feasible solution exists within relaxed tolerances.
  • ALMOST_DUAL_INFEASIBLE: The algorithm concluded that no dual bound exists for the problem within relaxed tolerances.
  • ALMOST_LOCALLY_SOLVED: The algorithm converged to a stationary point, local optimal solution, or could not find directions for improvement within relaxed tolerances.
  • ITERATION_LIMIT: An iterative algorithm stopped after conducting the maximum number of iterations.
  • TIME_LIMIT: The algorithm stopped after a user-specified computation time.
  • NODE_LIMIT: A branch-and-bound algorithm stopped because it explored a maximum number of nodes in the branch-and-bound tree.
  • SOLUTION_LIMIT: The algorithm stopped because it found the required number of solutions. This is often used in MIPs to get the solver to return the first feasible solution it encounters.
  • MEMORY_LIMIT: The algorithm stopped because it ran out of memory.
  • OBJECTIVE_LIMIT: The algorithm stopped because it found a solution better than a minimum limit set by the user.
  • NORM_LIMIT: The algorithm stopped because the norm of an iterate became too large.
  • OTHER_LIMIT: The algorithm stopped due to a limit not covered by one of the _LIMIT_ statuses above.
  • SLOW_PROGRESS: The algorithm stopped because it was unable to continue making progress towards the solution.
  • NUMERICAL_ERROR: The algorithm stopped because it encountered unrecoverable numerical error.
  • INVALID_MODEL: The algorithm stopped because the model is invalid.
  • INVALID_OPTION: The algorithm stopped because it was provided an invalid option.
  • INTERRUPTED: The algorithm stopped because of an interrupt signal.
  • OTHER_ERROR: The algorithm stopped because of an error not covered by one of the statuses defined above.
source
MathOptInterface.OPTIMIZE_NOT_CALLEDConstant
OPTIMIZE_NOT_CALLED::TerminationStatusCode

An instance of the TerminationStatusCode enum.

OPTIMIZE_NOT_CALLED: The algorithm has not started.

source
MathOptInterface.OPTIMALConstant
OPTIMAL::TerminationStatusCode

An instance of the TerminationStatusCode enum.

OPTIMAL: The algorithm found a globally optimal solution.

source
MathOptInterface.INFEASIBLEConstant
INFEASIBLE::TerminationStatusCode

An instance of the TerminationStatusCode enum.

INFEASIBLE: The algorithm concluded that no feasible solution exists.

source
MathOptInterface.DUAL_INFEASIBLEConstant
DUAL_INFEASIBLE::TerminationStatusCode

An instance of the TerminationStatusCode enum.

DUAL_INFEASIBLE: The algorithm concluded that no dual bound exists for the problem. If, additionally, a feasible (primal) solution is known to exist, this status typically implies that the problem is unbounded, with some technical exceptions.

source
MathOptInterface.LOCALLY_SOLVEDConstant
LOCALLY_SOLVED::TerminationStatusCode

An instance of the TerminationStatusCode enum.

LOCALLY_SOLVED: The algorithm converged to a stationary point, local optimal solution, could not find directions for improvement, or otherwise completed its search without global guarantees.

source
MathOptInterface.LOCALLY_INFEASIBLEConstant
LOCALLY_INFEASIBLE::TerminationStatusCode

An instance of the TerminationStatusCode enum.

LOCALLY_INFEASIBLE: The algorithm converged to an infeasible point or otherwise completed its search without finding a feasible solution, without guarantees that no feasible solution exists.

source
MathOptInterface.INFEASIBLE_OR_UNBOUNDEDConstant
INFEASIBLE_OR_UNBOUNDED::TerminationStatusCode

An instance of the TerminationStatusCode enum.

INFEASIBLE_OR_UNBOUNDED: The algorithm stopped because it decided that the problem is infeasible or unbounded; this occasionally happens during MIP presolve.

source
MathOptInterface.ALMOST_OPTIMALConstant
ALMOST_OPTIMAL::TerminationStatusCode

An instance of the TerminationStatusCode enum.

ALMOST_OPTIMAL: The algorithm found a globally optimal solution to relaxed tolerances.

source
MathOptInterface.ALMOST_INFEASIBLEConstant
ALMOST_INFEASIBLE::TerminationStatusCode

An instance of the TerminationStatusCode enum.

ALMOST_INFEASIBLE: The algorithm concluded that no feasible solution exists within relaxed tolerances.

source
MathOptInterface.ALMOST_DUAL_INFEASIBLEConstant
ALMOST_DUAL_INFEASIBLE::TerminationStatusCode

An instance of the TerminationStatusCode enum.

ALMOST_DUAL_INFEASIBLE: The algorithm concluded that no dual bound exists for the problem within relaxed tolerances.

source
MathOptInterface.ALMOST_LOCALLY_SOLVEDConstant
ALMOST_LOCALLY_SOLVED::TerminationStatusCode

An instance of the TerminationStatusCode enum.

ALMOST_LOCALLY_SOLVED: The algorithm converged to a stationary point, local optimal solution, or could not find directions for improvement within relaxed tolerances.

source
MathOptInterface.ITERATION_LIMITConstant
ITERATION_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

ITERATION_LIMIT: An iterative algorithm stopped after conducting the maximum number of iterations.

source
MathOptInterface.TIME_LIMITConstant
TIME_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

TIME_LIMIT: The algorithm stopped after a user-specified computation time.

source
MathOptInterface.NODE_LIMITConstant
NODE_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

NODE_LIMIT: A branch-and-bound algorithm stopped because it explored a maximum number of nodes in the branch-and-bound tree.

source
MathOptInterface.SOLUTION_LIMITConstant
SOLUTION_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

SOLUTION_LIMIT: The algorithm stopped because it found the required number of solutions. This is often used in MIPs to get the solver to return the first feasible solution it encounters.

source
MathOptInterface.MEMORY_LIMITConstant
MEMORY_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

MEMORY_LIMIT: The algorithm stopped because it ran out of memory.

source
MathOptInterface.OBJECTIVE_LIMITConstant
OBJECTIVE_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

OBJECTIVE_LIMIT: The algorithm stopped because it found a solution better than a minimum limit set by the user.

source
MathOptInterface.NORM_LIMITConstant
NORM_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

NORM_LIMIT: The algorithm stopped because the norm of an iterate became too large.

source
MathOptInterface.OTHER_LIMITConstant
OTHER_LIMIT::TerminationStatusCode

An instance of the TerminationStatusCode enum.

OTHER_LIMIT: The algorithm stopped due to a limit not covered by one of the _LIMIT_ statuses above.

source
MathOptInterface.SLOW_PROGRESSConstant
SLOW_PROGRESS::TerminationStatusCode

An instance of the TerminationStatusCode enum.

SLOW_PROGRESS: The algorithm stopped because it was unable to continue making progress towards the solution.

source
MathOptInterface.NUMERICAL_ERRORConstant
NUMERICAL_ERROR::TerminationStatusCode

An instance of the TerminationStatusCode enum.

NUMERICAL_ERROR: The algorithm stopped because it encountered unrecoverable numerical error.

source
MathOptInterface.INVALID_MODELConstant
INVALID_MODEL::TerminationStatusCode

An instance of the TerminationStatusCode enum.

INVALID_MODEL: The algorithm stopped because the model is invalid.

source
MathOptInterface.INVALID_OPTIONConstant
INVALID_OPTION::TerminationStatusCode

An instance of the TerminationStatusCode enum.

INVALID_OPTION: The algorithm stopped because it was provided an invalid option.

source
MathOptInterface.INTERRUPTEDConstant
INTERRUPTED::TerminationStatusCode

An instance of the TerminationStatusCode enum.

INTERRUPTED: The algorithm stopped because of an interrupt signal.

source
MathOptInterface.OTHER_ERRORConstant
OTHER_ERROR::TerminationStatusCode

An instance of the TerminationStatusCode enum.

OTHER_ERROR: The algorithm stopped because of an error not covered by one of the statuses defined above.

source
MathOptInterface.PrimalStatusType
PrimalStatus(result_index::Int = 1)

A model attribute for the ResultStatusCode of the primal result result_index. If result_index is omitted, it defaults to 1.

See ResultCount for information on how the results are ordered.

If result_index is larger than the value of ResultCount then NO_SOLUTION is returned.

source
MathOptInterface.DualStatusType
DualStatus(result_index::Int = 1)

A model attribute for the ResultStatusCode of the dual result result_index. If result_index is omitted, it defaults to 1.

See ResultCount for information on how the results are ordered.

If result_index is larger than the value of ResultCount then NO_SOLUTION is returned.

source
MathOptInterface.RawStatusStringType
RawStatusString()

A model attribute for a solver specific string explaining why the optimizer stopped.

source
MathOptInterface.ResultCountType
ResultCount()

A model attribute for the number of results available.

Order of solutions

A number of attributes contain an index, result_index, which is used to refer to one of the available results. Thus, result_index must be an integer between 1 and the number of available results.

As a general rule, the first result (result_index=1) is the most important result (for example, an optimal solution or an infeasibility certificate). Other results will typically be alternate solutions that the solver found during the search for the first result.

If a (local) optimal solution is available, that is, TerminationStatus is OPTIMAL or LOCALLY_SOLVED, the first result must correspond to the (locally) optimal solution. Other results may be alternative optimal solutions, or they may be other suboptimal solutions; use ObjectiveValue to distinguish between them.

If a primal or dual infeasibility certificate is available, that is, TerminationStatus is INFEASIBLE or DUAL_INFEASIBLE and the corresponding PrimalStatus or DualStatus is INFEASIBILITY_CERTIFICATE, then the first result must be a certificate. Other results may be alternate certificates, or infeasible points.

source
MathOptInterface.ObjectiveValueType
ObjectiveValue(result_index::Int = 1)

A model attribute for the objective value of the primal solution result_index.

If the solver does not have a primal value for the objective because the result_index is beyond the available solutions (whose number is indicated by the ResultCount attribute), getting this attribute must throw a ResultIndexBoundsError. Otherwise, if the result is unavailable for another reason (for instance, only a dual solution is available), the result is undefined. Users should first check PrimalStatus before accessing the ObjectiveValue attribute.

See ResultCount for information on how the results are ordered.

source
MathOptInterface.DualObjectiveValueType
DualObjectiveValue(result_index::Int = 1)

A model attribute for the value of the objective function of the dual problem for the result_indexth dual result.

If the solver does not have a dual value for the objective because the result_index is beyond the available solutions (whose number is indicated by the ResultCount attribute), getting this attribute must throw a ResultIndexBoundsError. Otherwise, if the result is unavailable for another reason (for instance, only a primal solution is available), the result is undefined. Users should first check DualStatus before accessing the DualObjectiveValue attribute.

See ResultCount for information on how the results are ordered.

source
MathOptInterface.ObjectiveBoundType
ObjectiveBound()

A model attribute for the best known bound on the optimal objective value.

source
MathOptInterface.RelativeGapType
RelativeGap()

A model attribute for the final relative optimality gap.

Warning

The definition of this gap is solver-dependent. However, most solvers implementing this attribute define the relative gap as some variation of $\frac{|b-f|}{|f|}$, where $b$ is the best bound and $f$ is the best feasible objective value.

source
MathOptInterface.SolveTimeSecType
SolveTimeSec()

A model attribute for the total elapsed solution time (in seconds) as reported by the optimizer.

source
MathOptInterface.SimplexIterationsType
SimplexIterations()

A model attribute for the cumulative number of simplex iterations during the optimization process.

For a mixed-integer program (MIP), the return value is the total simplex iterations for all nodes.

source
MathOptInterface.BarrierIterationsType
BarrierIterations()

A model attribute for the cumulative number of barrier iterations while solving a problem.

source
MathOptInterface.NodeCountType
NodeCount()

A model attribute for the total number of branch-and-bound nodes explored while solving a mixed-integer program (MIP).

source

ResultStatusCode

MathOptInterface.ResultStatusCodeType
ResultStatusCode

An Enum of possible values for the PrimalStatus and DualStatus attributes.

The values indicate how to interpret the result vector.

Values

Possible values are:

  • NO_SOLUTION: the result vector is empty.
  • FEASIBLE_POINT: the result vector is a feasible point.
  • NEARLY_FEASIBLE_POINT: the result vector is feasible if some constraint tolerances are relaxed.
  • INFEASIBLE_POINT: the result vector is an infeasible point.
  • INFEASIBILITY_CERTIFICATE: the result vector is an infeasibility certificate. If the PrimalStatus is INFEASIBILITY_CERTIFICATE, then the primal result vector is a certificate of dual infeasibility. If the DualStatus is INFEASIBILITY_CERTIFICATE, then the dual result vector is a proof of primal infeasibility.
  • NEARLY_INFEASIBILITY_CERTIFICATE: the result satisfies a relaxed criterion for a certificate of infeasibility.
  • REDUCTION_CERTIFICATE: the result vector is an ill-posed certificate; see this article for details. If the PrimalStatus is REDUCTION_CERTIFICATE, then the primal result vector is a proof that the dual problem is ill-posed. If the DualStatus is REDUCTION_CERTIFICATE, then the dual result vector is a proof that the primal is ill-posed.
  • NEARLY_REDUCTION_CERTIFICATE: the result satisfies a relaxed criterion for an ill-posed certificate.
  • UNKNOWN_RESULT_STATUS: the result vector contains a solution with an unknown interpretation.
  • OTHER_RESULT_STATUS: the result vector contains a solution with an interpretation not covered by one of the statuses defined above
source
MathOptInterface.NO_SOLUTIONConstant
NO_SOLUTION::ResultStatusCode

An instance of the ResultStatusCode enum.

NO_SOLUTION: the result vector is empty.

source
MathOptInterface.FEASIBLE_POINTConstant
FEASIBLE_POINT::ResultStatusCode

An instance of the ResultStatusCode enum.

FEASIBLE_POINT: the result vector is a feasible point.

source
MathOptInterface.NEARLY_FEASIBLE_POINTConstant
NEARLY_FEASIBLE_POINT::ResultStatusCode

An instance of the ResultStatusCode enum.

NEARLY_FEASIBLE_POINT: the result vector is feasible if some constraint tolerances are relaxed.

source
MathOptInterface.INFEASIBLE_POINTConstant
INFEASIBLE_POINT::ResultStatusCode

An instance of the ResultStatusCode enum.

INFEASIBLE_POINT: the result vector is an infeasible point.

source
MathOptInterface.INFEASIBILITY_CERTIFICATEConstant
INFEASIBILITY_CERTIFICATE::ResultStatusCode

An instance of the ResultStatusCode enum.

INFEASIBILITY_CERTIFICATE: the result vector is an infeasibility certificate. If the PrimalStatus is INFEASIBILITY_CERTIFICATE, then the primal result vector is a certificate of dual infeasibility. If the DualStatus is INFEASIBILITY_CERTIFICATE, then the dual result vector is a proof of primal infeasibility.

source
MathOptInterface.NEARLY_INFEASIBILITY_CERTIFICATEConstant
NEARLY_INFEASIBILITY_CERTIFICATE::ResultStatusCode

An instance of the ResultStatusCode enum.

NEARLY_INFEASIBILITY_CERTIFICATE: the result satisfies a relaxed criterion for a certificate of infeasibility.

source
MathOptInterface.REDUCTION_CERTIFICATEConstant
REDUCTION_CERTIFICATE::ResultStatusCode

An instance of the ResultStatusCode enum.

REDUCTION_CERTIFICATE: the result vector is an ill-posed certificate; see this article for details. If the PrimalStatus is REDUCTION_CERTIFICATE, then the primal result vector is a proof that the dual problem is ill-posed. If the DualStatus is REDUCTION_CERTIFICATE, then the dual result vector is a proof that the primal is ill-posed.

source
MathOptInterface.NEARLY_REDUCTION_CERTIFICATEConstant
NEARLY_REDUCTION_CERTIFICATE::ResultStatusCode

An instance of the ResultStatusCode enum.

NEARLY_REDUCTION_CERTIFICATE: the result satisfies a relaxed criterion for an ill-posed certificate.

source
MathOptInterface.UNKNOWN_RESULT_STATUSConstant
UNKNOWN_RESULT_STATUS::ResultStatusCode

An instance of the ResultStatusCode enum.

UNKNOWN_RESULT_STATUS: the result vector contains a solution with an unknown interpretation.

source
MathOptInterface.OTHER_RESULT_STATUSConstant
OTHER_RESULT_STATUS::ResultStatusCode

An instance of the ResultStatusCode enum.

OTHER_RESULT_STATUS: the result vector contains a solution with an interpretation not covered by one of the statuses defined above

source

Conflict Status

MathOptInterface.compute_conflict!Function
compute_conflict!(optimizer::AbstractOptimizer)

Computes a minimal subset of constraints such that the model with the other constraint removed is still infeasible.

Some solvers call a set of conflicting constraints an Irreducible Inconsistent Subsystem (IIS).

See also ConflictStatus and ConstraintConflictStatus.

Note

If the model is modified after a call to compute_conflict!, the implementor is not obliged to purge the conflict. Any calls to the above attributes may return values for the original conflict without a warning. Similarly, when modifying the model, the conflict can be discarded.

source
MathOptInterface.ConflictStatusType
ConflictStatus()

A model attribute for the ConflictStatusCode explaining why the conflict refiner stopped when computing the conflict.

source
MathOptInterface.ConstraintConflictStatusType
ConstraintConflictStatus()

A constraint attribute indicating whether the constraint participates in the conflict. Its type is ConflictParticipationStatusCode.

source
MathOptInterface.ConflictStatusCodeType
ConflictStatusCode

An Enum of possible values for the ConflictStatus attribute. This attribute is meant to explain the reason why the conflict finder stopped executing in the most recent call to compute_conflict!.

Possible values are:

  • COMPUTE_CONFLICT_NOT_CALLED: the function compute_conflict! has not yet been called
  • NO_CONFLICT_EXISTS: there is no conflict because the problem is feasible
  • NO_CONFLICT_FOUND: the solver could not find a conflict
  • CONFLICT_FOUND: at least one conflict could be found
source
MathOptInterface.ConflictParticipationStatusCodeType
ConflictParticipationStatusCode

An Enum of possible values for the ConstraintConflictStatus attribute. This attribute is meant to indicate whether a given constraint participates or not in the last computed conflict.

Values

Possible values are:

  • NOT_IN_CONFLICT: the constraint does not participate in the conflict
  • IN_CONFLICT: the constraint participates in the conflict
  • MAYBE_IN_CONFLICT: the constraint may participate in the conflict, the solver was not able to prove that the constraint can be excluded from the conflict
source
MathOptInterface.NOT_IN_CONFLICTConstant
NOT_IN_CONFLICT::ConflictParticipationStatusCode

An instance of the ConflictParticipationStatusCode enum.

NOT_IN_CONFLICT: the constraint does not participate in the conflict

source
MathOptInterface.IN_CONFLICTConstant
IN_CONFLICT::ConflictParticipationStatusCode

An instance of the ConflictParticipationStatusCode enum.

IN_CONFLICT: the constraint participates in the conflict

source
MathOptInterface.MAYBE_IN_CONFLICTConstant
MAYBE_IN_CONFLICT::ConflictParticipationStatusCode

An instance of the ConflictParticipationStatusCode enum.

MAYBE_IN_CONFLICT: the constraint may participate in the conflict, the solver was not able to prove that the constraint can be excluded from the conflict

source
diff --git a/dev/moi/reference/modification/index.html b/dev/moi/reference/modification/index.html index 156ae6ea891..9ef41588ca9 100644 --- a/dev/moi/reference/modification/index.html +++ b/dev/moi/reference/modification/index.html @@ -97,4 +97,4 @@ )

A struct used to request a change in the quadratic coefficient of a ScalarQuadraticFunction.

Scaling factors

A ScalarQuadraticFunction has an implicit 0.5 scaling factor in front of the Q matrix. This modification applies to terms in the Q matrix.

If variable_1 == variable_2, this modification sets the corresponding diagonal element of the Q matrix to new_coefficient.

If variable_1 != variable_2, this modification is equivalent to setting both the corresponding upper- and lower-triangular elements of the Q matrix to new_coefficient.

As a consequence:

source
MathOptInterface.MultirowChangeType
MultirowChange{T}(
     variable::VariableIndex,
     new_coefficients::Vector{Tuple{Int64,T}},
-) where {T}

A struct used to request a change in the linear coefficients of a single variable in a vector-valued function.

New coefficients are specified by (output_index, coefficient) tuples.

Applicable to VectorAffineFunction and VectorQuadraticFunction.

source
+) where {T}

A struct used to request a change in the linear coefficients of a single variable in a vector-valued function.

New coefficients are specified by (output_index, coefficient) tuples.

Applicable to VectorAffineFunction and VectorQuadraticFunction.

source diff --git a/dev/moi/reference/nonlinear/index.html b/dev/moi/reference/nonlinear/index.html index fd45c70b8a3..0eec1527075 100644 --- a/dev/moi/reference/nonlinear/index.html +++ b/dev/moi/reference/nonlinear/index.html @@ -425,4 +425,4 @@ :(x[MOI.VariableIndex(1)] * x[MOI.VariableIndex(2)] * x[MOI.VariableIndex(3)] * x[MOI.VariableIndex(4)] >= 25.0) julia> MOI.constraint_expr(evaluator, 2) -:(x[MOI.VariableIndex(1)] ^ 2 + x[MOI.VariableIndex(2)] ^ 2 + x[MOI.VariableIndex(3)] ^ 2 + x[MOI.VariableIndex(4)] ^ 2 == 40.0)source +:(x[MOI.VariableIndex(1)] ^ 2 + x[MOI.VariableIndex(2)] ^ 2 + x[MOI.VariableIndex(3)] ^ 2 + x[MOI.VariableIndex(4)] ^ 2 == 40.0)source diff --git a/dev/moi/reference/standard_form/index.html b/dev/moi/reference/standard_form/index.html index 15c57a592e7..1a2f74c52ce 100644 --- a/dev/moi/reference/standard_form/index.html +++ b/dev/moi/reference/standard_form/index.html @@ -944,4 +944,4 @@ MOI.VectorOfVariables([t; vec(X)]), MOI.RootDetConeSquare(2), ) -MathOptInterface.ConstraintIndex{MathOptInterface.VectorOfVariables, MathOptInterface.RootDetConeSquare}(1)source +MathOptInterface.ConstraintIndex{MathOptInterface.VectorOfVariables, MathOptInterface.RootDetConeSquare}(1)source diff --git a/dev/moi/reference/variables/index.html b/dev/moi/reference/variables/index.html index 980cd639c7d..7b22882cb15 100644 --- a/dev/moi/reference/variables/index.html +++ b/dev/moi/reference/variables/index.html @@ -63,4 +63,4 @@ )::Bool

Return a Bool indicating whether model supports constraining a variable to belong to a set of type S either on creation of the variable with add_constrained_variable or after the variable is created with add_constraint.

By default, this function falls back to supports_add_constrained_variables(model, Reals) && supports_constraint(model, MOI.VariableIndex, S) which is the correct definition for most models.

Example

Suppose that a solver supports only two kind of variables: binary variables and continuous variables with a lower bound. If the solver decides not to support VariableIndex-in-Binary and VariableIndex-in-GreaterThan constraints, it only has to implement add_constrained_variable for these two sets which prevents the user to add both a binary constraint and a lower bound on the same variable. Moreover, if the user adds a VariableIndex-in-GreaterThan constraint, implementing this interface (that is, supports_add_constrained_variables) enables the constraint to be transparently bridged into a supported constraint.

source
MathOptInterface.supports_add_constrained_variablesFunction
supports_add_constrained_variables(
     model::ModelLike,
     S::Type{<:AbstractVectorSet}
-)::Bool

Return a Bool indicating whether model supports constraining a vector of variables to belong to a set of type S either on creation of the vector of variables with add_constrained_variables or after the variable is created with add_constraint.

By default, if S is Reals then this function returns true and otherwise, it falls back to supports_add_constrained_variables(model, Reals) && supports_constraint(model, MOI.VectorOfVariables, S) which is the correct definition for most models.

Example

In the standard conic form (see Duality), the variables are grouped into several cones and the constraints are affine equality constraints. If Reals is not one of the cones supported by the solvers then it needs to implement supports_add_constrained_variables(::Optimizer, ::Type{Reals}) = false as free variables are not supported. The solvers should then implement supports_add_constrained_variables(::Optimizer, ::Type{<:SupportedCones}) = true where SupportedCones is the union of all cone types that are supported; it does not have to implement the method supports_constraint(::Type{VectorOfVariables}, Type{<:SupportedCones}) as it should return false and it's the default. This prevents the user to constrain the same variable in two different cones. When a VectorOfVariables-in-S is added, the variables of the vector have already been created so they already belong to given cones. If bridges are enabled, the constraint will therefore be bridged by adding slack variables in S and equality constraints ensuring that the slack variables are equal to the corresponding variables of the given constraint function.

Note that there may also be sets for which !supports_add_constrained_variables(model, S) and supports_constraint(model, MOI.VectorOfVariables, S). For instance, suppose a solver supports positive semidefinite variable constraints and two types of variables: binary variables and nonnegative variables. Then the solver should support adding VectorOfVariables-in-PositiveSemidefiniteConeTriangle constraints, but it should not support creating variables constrained to belong to the PositiveSemidefiniteConeTriangle because the variables in PositiveSemidefiniteConeTriangle should first be created as either binary or non-negative.

source
MathOptInterface.is_validMethod
is_valid(model::ModelLike, index::Index)::Bool

Return a Bool indicating whether this index refers to a valid object in the model model.

source
MathOptInterface.deleteMethod
delete(model::ModelLike, index::Index)

Delete the referenced object from the model. Throw DeleteNotAllowed if if index cannot be deleted.

The following modifications also take effect if Index is VariableIndex:

  • If index used in the objective function, it is removed from the function, that is, it is substituted for zero.
  • For each func-in-set constraint of the model:
    • If func isa VariableIndex and func == index then the constraint is deleted.
    • If func isa VectorOfVariables and index in func.variables then
      • if length(func.variables) == 1 is one, the constraint is deleted;
      • if length(func.variables) > 1 and supports_dimension_update(set) then then the variable is removed from func and set is replaced by update_dimension(set, MOI.dimension(set) - 1).
      • Otherwise, a DeleteNotAllowed error is thrown.
    • Otherwise, the variable is removed from func, that is, it is substituted for zero.
source
MathOptInterface.deleteMethod
delete(model::ModelLike, indices::Vector{R<:Index}) where {R}

Delete the referenced objects in the vector indices from the model. It may be assumed that R is a concrete type. The default fallback sequentially deletes the individual items in indices, although specialized implementations may be more efficient.

source

Attributes

MathOptInterface.AbstractVariableAttributeType
AbstractVariableAttribute

Abstract supertype for attribute objects that can be used to set or get attributes (properties) of variables in the model.

source
MathOptInterface.VariableNameType
VariableName()

A variable attribute for a string identifying the variable. It is valid for two variables to have the same name; however, variables with duplicate names cannot be looked up using get. It has a default value of "" if not set`.

source
MathOptInterface.VariablePrimalStartType
VariablePrimalStart()

A variable attribute for the initial assignment to some primal variable's value that the optimizer may use to warm-start the solve. May be a number or nothing (unset).

source
MathOptInterface.VariablePrimalType
VariablePrimal(result_index::Int = 1)

A variable attribute for the assignment to some primal variable's value in result result_index. If result_index is omitted, it is 1 by default.

If the solver does not have a primal value for the variable because the result_index is beyond the available solutions (whose number is indicated by the ResultCount attribute), getting this attribute must throw a ResultIndexBoundsError. Otherwise, if the result is unavailable for another reason (for instance, only a dual solution is available), the result is undefined. Users should first check PrimalStatus before accessing the VariablePrimal attribute.

See ResultCount for information on how the results are ordered.

source
MathOptInterface.VariableBasisStatusType
VariableBasisStatus(result_index::Int = 1)

A variable attribute for the BasisStatusCode of a variable in result result_index, with respect to an available optimal solution basis.

If the solver does not have a basis status for the variable because the result_index is beyond the available solutions (whose number is indicated by the ResultCount attribute), getting this attribute must throw a ResultIndexBoundsError. Otherwise, if the result is unavailable for another reason (for instance, only a dual solution is available), the result is undefined. Users should first check PrimalStatus before accessing the VariableBasisStatus attribute.

See ResultCount for information on how the results are ordered.

source
+)::Bool

Return a Bool indicating whether model supports constraining a vector of variables to belong to a set of type S either on creation of the vector of variables with add_constrained_variables or after the variable is created with add_constraint.

By default, if S is Reals then this function returns true and otherwise, it falls back to supports_add_constrained_variables(model, Reals) && supports_constraint(model, MOI.VectorOfVariables, S) which is the correct definition for most models.

Example

In the standard conic form (see Duality), the variables are grouped into several cones and the constraints are affine equality constraints. If Reals is not one of the cones supported by the solvers then it needs to implement supports_add_constrained_variables(::Optimizer, ::Type{Reals}) = false as free variables are not supported. The solvers should then implement supports_add_constrained_variables(::Optimizer, ::Type{<:SupportedCones}) = true where SupportedCones is the union of all cone types that are supported; it does not have to implement the method supports_constraint(::Type{VectorOfVariables}, Type{<:SupportedCones}) as it should return false and it's the default. This prevents the user to constrain the same variable in two different cones. When a VectorOfVariables-in-S is added, the variables of the vector have already been created so they already belong to given cones. If bridges are enabled, the constraint will therefore be bridged by adding slack variables in S and equality constraints ensuring that the slack variables are equal to the corresponding variables of the given constraint function.

Note that there may also be sets for which !supports_add_constrained_variables(model, S) and supports_constraint(model, MOI.VectorOfVariables, S). For instance, suppose a solver supports positive semidefinite variable constraints and two types of variables: binary variables and nonnegative variables. Then the solver should support adding VectorOfVariables-in-PositiveSemidefiniteConeTriangle constraints, but it should not support creating variables constrained to belong to the PositiveSemidefiniteConeTriangle because the variables in PositiveSemidefiniteConeTriangle should first be created as either binary or non-negative.

source
MathOptInterface.is_validMethod
is_valid(model::ModelLike, index::Index)::Bool

Return a Bool indicating whether this index refers to a valid object in the model model.

source
MathOptInterface.deleteMethod
delete(model::ModelLike, index::Index)

Delete the referenced object from the model. Throw DeleteNotAllowed if if index cannot be deleted.

The following modifications also take effect if Index is VariableIndex:

  • If index used in the objective function, it is removed from the function, that is, it is substituted for zero.
  • For each func-in-set constraint of the model:
    • If func isa VariableIndex and func == index then the constraint is deleted.
    • If func isa VectorOfVariables and index in func.variables then
      • if length(func.variables) == 1 is one, the constraint is deleted;
      • if length(func.variables) > 1 and supports_dimension_update(set) then then the variable is removed from func and set is replaced by update_dimension(set, MOI.dimension(set) - 1).
      • Otherwise, a DeleteNotAllowed error is thrown.
    • Otherwise, the variable is removed from func, that is, it is substituted for zero.
source
MathOptInterface.deleteMethod
delete(model::ModelLike, indices::Vector{R<:Index}) where {R}

Delete the referenced objects in the vector indices from the model. It may be assumed that R is a concrete type. The default fallback sequentially deletes the individual items in indices, although specialized implementations may be more efficient.

source

Attributes

MathOptInterface.AbstractVariableAttributeType
AbstractVariableAttribute

Abstract supertype for attribute objects that can be used to set or get attributes (properties) of variables in the model.

source
MathOptInterface.VariableNameType
VariableName()

A variable attribute for a string identifying the variable. It is valid for two variables to have the same name; however, variables with duplicate names cannot be looked up using get. It has a default value of "" if not set`.

source
MathOptInterface.VariablePrimalStartType
VariablePrimalStart()

A variable attribute for the initial assignment to some primal variable's value that the optimizer may use to warm-start the solve. May be a number or nothing (unset).

source
MathOptInterface.VariablePrimalType
VariablePrimal(result_index::Int = 1)

A variable attribute for the assignment to some primal variable's value in result result_index. If result_index is omitted, it is 1 by default.

If the solver does not have a primal value for the variable because the result_index is beyond the available solutions (whose number is indicated by the ResultCount attribute), getting this attribute must throw a ResultIndexBoundsError. Otherwise, if the result is unavailable for another reason (for instance, only a dual solution is available), the result is undefined. Users should first check PrimalStatus before accessing the VariablePrimal attribute.

See ResultCount for information on how the results are ordered.

source
MathOptInterface.VariableBasisStatusType
VariableBasisStatus(result_index::Int = 1)

A variable attribute for the BasisStatusCode of a variable in result result_index, with respect to an available optimal solution basis.

If the solver does not have a basis status for the variable because the result_index is beyond the available solutions (whose number is indicated by the ResultCount attribute), getting this attribute must throw a ResultIndexBoundsError. Otherwise, if the result is unavailable for another reason (for instance, only a dual solution is available), the result is undefined. Users should first check PrimalStatus before accessing the VariableBasisStatus attribute.

See ResultCount for information on how the results are ordered.

source
diff --git a/dev/moi/release_notes/index.html b/dev/moi/release_notes/index.html index c8cfdcd3ff7..799cf13efa8 100644 --- a/dev/moi/release_notes/index.html +++ b/dev/moi/release_notes/index.html @@ -31,4 +31,4 @@ end write(path, s) end -end

v0.9.22 (May 22, 2021)

This release contains backports from the ongoing development of the v0.10 release.

v0.9.21 (April 23, 2021)

v0.9.20 (February 20, 2021)

v0.9.19 (December 1, 2020)

v0.9.18 (November 3, 2020)

v0.9.17 (September 21, 2020)

v0.9.16 (September 17, 2020)

v0.9.15 (September 14, 2020)

v0.9.14 (May 30, 2020)

v0.9.13 (March 24, 2020)

v0.9.12 (February 28, 2020)

v0.9.11 (February 21, 2020)

v0.9.10 (January 31, 2020)

v0.9.9 (December 29, 2019)

v0.9.8 (December 19, 2019)

v0.9.7 (October 30, 2019)

v0.9.6 (October 25, 2019)

v0.9.5 (October 9, 2019)

v0.9.4 (October 2, 2019)

v0.9.3 (September 20, 2019)

v0.9.2 (September 5, 2019)

v0.9.1 (August 22, 2019)

v0.9.0 (August 13, 2019)

v0.8.4 (March 13, 2019)

v0.8.3 (March 6, 2019)

v0.8.2 (February 7, 2019)

v0.8.1 (January 7, 2019)

v0.8.0 (December 18, 2018)

v0.7.0 (December 13, 2018)

v0.6.4 (November 27, 2018)

v0.6.3 (November 16, 2018)

v0.6.2 (October 26, 2018)

v0.6.1 (September 22, 2018)

v0.6.0 (August 30, 2018)

v0.5.0 (August 5, 2018)

v0.4.1 (June 28, 2018)

v0.4.0 (June 23, 2018)

v0.3.0 (May 25, 2018)

v0.2.0 (April 24, 2018)

v0.1.0 (February 28, 2018)

+end

v0.9.22 (May 22, 2021)

This release contains backports from the ongoing development of the v0.10 release.

v0.9.21 (April 23, 2021)

v0.9.20 (February 20, 2021)

v0.9.19 (December 1, 2020)

v0.9.18 (November 3, 2020)

v0.9.17 (September 21, 2020)

v0.9.16 (September 17, 2020)

v0.9.15 (September 14, 2020)

v0.9.14 (May 30, 2020)

v0.9.13 (March 24, 2020)

v0.9.12 (February 28, 2020)

v0.9.11 (February 21, 2020)

v0.9.10 (January 31, 2020)

v0.9.9 (December 29, 2019)

v0.9.8 (December 19, 2019)

v0.9.7 (October 30, 2019)

v0.9.6 (October 25, 2019)

v0.9.5 (October 9, 2019)

v0.9.4 (October 2, 2019)

v0.9.3 (September 20, 2019)

v0.9.2 (September 5, 2019)

v0.9.1 (August 22, 2019)

v0.9.0 (August 13, 2019)

v0.8.4 (March 13, 2019)

v0.8.3 (March 6, 2019)

v0.8.2 (February 7, 2019)

v0.8.1 (January 7, 2019)

v0.8.0 (December 18, 2018)

v0.7.0 (December 13, 2018)

v0.6.4 (November 27, 2018)

v0.6.3 (November 16, 2018)

v0.6.2 (October 26, 2018)

v0.6.1 (September 22, 2018)

v0.6.0 (August 30, 2018)

v0.5.0 (August 5, 2018)

v0.4.1 (June 28, 2018)

v0.4.0 (June 23, 2018)

v0.3.0 (May 25, 2018)

v0.2.0 (April 24, 2018)

v0.1.0 (February 28, 2018)

diff --git a/dev/moi/submodules/Benchmarks/overview/index.html b/dev/moi/submodules/Benchmarks/overview/index.html index 624e0422cf9..1d2e6d300f1 100644 --- a/dev/moi/submodules/Benchmarks/overview/index.html +++ b/dev/moi/submodules/Benchmarks/overview/index.html @@ -21,4 +21,4 @@ MOI.Benchmarks.compare_against_baseline( suite, "current"; directory = "/tmp", verbose = true -)

This comparison will create a report detailing improvements and regressions.

+)

This comparison will create a report detailing improvements and regressions.

diff --git a/dev/moi/submodules/Benchmarks/reference/index.html b/dev/moi/submodules/Benchmarks/reference/index.html index 48afd3383ac..aa9039f5b4e 100644 --- a/dev/moi/submodules/Benchmarks/reference/index.html +++ b/dev/moi/submodules/Benchmarks/reference/index.html @@ -37,4 +37,4 @@ "glpk_master"; directory = "/tmp", verbose = true, - )source + )source diff --git a/dev/moi/submodules/Bridges/implementation/index.html b/dev/moi/submodules/Bridges/implementation/index.html index a2692b365be..3bd15138e1a 100644 --- a/dev/moi/submodules/Bridges/implementation/index.html +++ b/dev/moi/submodules/Bridges/implementation/index.html @@ -33,4 +33,4 @@ Subject to: ScalarAffineFunction{Int64}-in-LessThan{Int64} - (0) - (1) x <= (-1) + (0) - (1) x <= (-1) diff --git a/dev/moi/submodules/Bridges/list_of_bridges/index.html b/dev/moi/submodules/Bridges/list_of_bridges/index.html index 492a73c387a..1d33683c20b 100644 --- a/dev/moi/submodules/Bridges/list_of_bridges/index.html +++ b/dev/moi/submodules/Bridges/list_of_bridges/index.html @@ -129,4 +129,4 @@ & & & x_{11} & x_{12} & x_{13} \\ & & & & x_{22} & x_{23} \\ & & & & & x_{33} -\end{bmatrix}\]

is positive semidefinite.

The bridge achieves this reformulation by adding a new set of variables in MOI.PositiveSemidefiniteConeTriangle(6), and then adding three groups of equality constraints to:

source
MathOptInterface.Bridges.Variable.NonposToNonnegBridgeType
NonposToNonnegBridge{T} <: Bridges.Variable.AbstractBridge

NonposToNonnegBridge implements the following reformulation:

  • $x \in \mathbb{R}_-$ into $y \in \mathbb{R}_+$ with the substitution rule $x = -y$,

where T is the coefficient type of -y.

Source node

NonposToNonnegBridge supports:

Target nodes

NonposToNonnegBridge creates:

source
MathOptInterface.Bridges.Variable.ParameterToEqualToBridgeType
ParameterToEqualToBridge{T} <: Bridges.Variable.AbstractBridge

ParameterToEqualToBridge implements the following reformulation:

  • $x \in Parameter(v)$ into $x == v$

Source node

ParameterToEqualToBridge supports:

Target nodes

ParameterToEqualToBridge creates:

source
MathOptInterface.Bridges.Variable.RSOCtoPSDBridgeType
RSOCtoPSDBridge{T} <: Bridges.Variable.AbstractBridge

RSOCtoPSDBridge implements the following reformulation:

  • $||x||_2^2 \le 2tu$ where $t, u \ge 0$ into $Y \succeq 0$, with the substitution rule: $Y = \left[\begin{array}{c c}t & x^\top \\ x & 2u \mathbf{I}\end{array}\right].$

Additional bounds are added to ensure the off-diagonals of the $2uI$ submatrix are 0, and linear constraints are added to ensure the diagonal of $2uI$ takes the same values.

As a special case, if $|x|| = 0$, then RSOCtoPSDBridge reformulates into $(t, u) \in \mathbb{R}_+$.

Source node

RSOCtoPSDBridge supports:

Target nodes

RSOCtoPSDBridge creates:

source
MathOptInterface.Bridges.Variable.RSOCtoSOCBridgeType
RSOCtoSOCBridge{T} <: Bridges.Variable.AbstractBridge

RSOCtoSOCBridge implements the following reformulation:

  • $||x||_2^2 \le 2tu$ into $||v||_2 \le w$, with the substitution rules $t = \frac{w}{\sqrt 2} + \frac{v_1}{\sqrt 2}$, $u = \frac{w}{\sqrt 2} - \frac{v_1}{\sqrt 2}$, and $x = (v_2,\ldots,v_N)$.

Source node

RSOCtoSOCBridge supports:

Target node

RSOCtoSOCBridge creates:

source
MathOptInterface.Bridges.Variable.SOCtoRSOCBridgeType
SOCtoRSOCBridge{T} <: Bridges.Variable.AbstractBridge

SOCtoRSOCBridge implements the following reformulation:

  • $||x||_2 \le t$ into $2uv \ge ||w||_2^2$, with the substitution rules $t = \frac{u}{\sqrt 2} + \frac{v}{\sqrt 2}$, $x = (\frac{u}{\sqrt 2} - \frac{v}{\sqrt 2}, w)$.

Assumptions

  • SOCtoRSOCBridge assumes that $|x| \ge 1$.

Source node

SOCtoRSOCBridge supports:

Target node

SOCtoRSOCBridge creates:

source
MathOptInterface.Bridges.Variable.SetMapBridgeType
abstract type SetMapBridge{T,S1,S2} <: AbstractBridge end

Consider two type of sets, S1 and S2, and a linear mapping A such that the image of a set of type S1 under A is a set of type S2.

A SetMapBridge{T,S1,S2} is a bridge that substitutes constrained variables in S2 into the image through A of constrained variables in S1.

The linear map A is described by:

Implementing a method for these two functions is sufficient to bridge constrained variables. However, in order for the getters and setters of attributes such as dual solutions and starting values to work as well, a method for the following functions must be implemented:

See the docstrings of each function to see which feature would be missing if it was not implemented for a given bridge.

source
MathOptInterface.Bridges.Variable.VectorizeBridgeType
VectorizeBridge{T,S} <: Bridges.Variable.AbstractBridge

VectorizeBridge implements the following reformulations:

  • $x \ge a$ into $[y] \in \mathbb{R}_+$ with the substitution rule $x = a + y$
  • $x \le a$ into $[y] \in \mathbb{R}_-$ with the substitution rule $x = a + y$
  • $x == a$ into $[y] \in \{0\}$ with the substitution rule $x = a + y$

where T is the coefficient type of a + y.

Source node

VectorizeBridge supports:

Target nodes

VectorizeBridge creates:

source
MathOptInterface.Bridges.Variable.ZerosBridgeType
ZerosBridge{T} <: Bridges.Variable.AbstractBridge

ZerosBridge implements the following reformulation:

  • $x \in \{0\}$ into the substitution rule $x = 0$,

where T is the coefficient type of 0.

Source node

ZerosBridge supports:

Target nodes

ZerosBridge does not create target nodes. It replaces all instances of x with 0 via substitution. This means that no variables are created in the underlying model.

Caveats

The bridged variables are similar to parameters with zero values. Parameters with non-zero values can be created with constrained variables in MOI.EqualTo by combining a VectorizeBridge and this bridge.

However, functions modified by ZerosBridge cannot be unbridged. That is, for a given function, we cannot determine if the bridged variables were used.

A related implication is that this bridge does not support MOI.ConstraintDual. However, if a MOI.Utilities.CachingOptimizer is used, the dual can be determined by the bridged optimizer using MOI.Utilities.get_fallback because the caching optimizer records the unbridged function.

source
+\end{bmatrix}\]

is positive semidefinite.

The bridge achieves this reformulation by adding a new set of variables in MOI.PositiveSemidefiniteConeTriangle(6), and then adding three groups of equality constraints to:

source
MathOptInterface.Bridges.Variable.NonposToNonnegBridgeType
NonposToNonnegBridge{T} <: Bridges.Variable.AbstractBridge

NonposToNonnegBridge implements the following reformulation:

  • $x \in \mathbb{R}_-$ into $y \in \mathbb{R}_+$ with the substitution rule $x = -y$,

where T is the coefficient type of -y.

Source node

NonposToNonnegBridge supports:

Target nodes

NonposToNonnegBridge creates:

source
MathOptInterface.Bridges.Variable.ParameterToEqualToBridgeType
ParameterToEqualToBridge{T} <: Bridges.Variable.AbstractBridge

ParameterToEqualToBridge implements the following reformulation:

  • $x \in Parameter(v)$ into $x == v$

Source node

ParameterToEqualToBridge supports:

Target nodes

ParameterToEqualToBridge creates:

source
MathOptInterface.Bridges.Variable.RSOCtoPSDBridgeType
RSOCtoPSDBridge{T} <: Bridges.Variable.AbstractBridge

RSOCtoPSDBridge implements the following reformulation:

  • $||x||_2^2 \le 2tu$ where $t, u \ge 0$ into $Y \succeq 0$, with the substitution rule: $Y = \left[\begin{array}{c c}t & x^\top \\ x & 2u \mathbf{I}\end{array}\right].$

Additional bounds are added to ensure the off-diagonals of the $2uI$ submatrix are 0, and linear constraints are added to ensure the diagonal of $2uI$ takes the same values.

As a special case, if $|x|| = 0$, then RSOCtoPSDBridge reformulates into $(t, u) \in \mathbb{R}_+$.

Source node

RSOCtoPSDBridge supports:

Target nodes

RSOCtoPSDBridge creates:

source
MathOptInterface.Bridges.Variable.RSOCtoSOCBridgeType
RSOCtoSOCBridge{T} <: Bridges.Variable.AbstractBridge

RSOCtoSOCBridge implements the following reformulation:

  • $||x||_2^2 \le 2tu$ into $||v||_2 \le w$, with the substitution rules $t = \frac{w}{\sqrt 2} + \frac{v_1}{\sqrt 2}$, $u = \frac{w}{\sqrt 2} - \frac{v_1}{\sqrt 2}$, and $x = (v_2,\ldots,v_N)$.

Source node

RSOCtoSOCBridge supports:

Target node

RSOCtoSOCBridge creates:

source
MathOptInterface.Bridges.Variable.SOCtoRSOCBridgeType
SOCtoRSOCBridge{T} <: Bridges.Variable.AbstractBridge

SOCtoRSOCBridge implements the following reformulation:

  • $||x||_2 \le t$ into $2uv \ge ||w||_2^2$, with the substitution rules $t = \frac{u}{\sqrt 2} + \frac{v}{\sqrt 2}$, $x = (\frac{u}{\sqrt 2} - \frac{v}{\sqrt 2}, w)$.

Assumptions

  • SOCtoRSOCBridge assumes that $|x| \ge 1$.

Source node

SOCtoRSOCBridge supports:

Target node

SOCtoRSOCBridge creates:

source
MathOptInterface.Bridges.Variable.SetMapBridgeType
abstract type SetMapBridge{T,S1,S2} <: AbstractBridge end

Consider two type of sets, S1 and S2, and a linear mapping A such that the image of a set of type S1 under A is a set of type S2.

A SetMapBridge{T,S1,S2} is a bridge that substitutes constrained variables in S2 into the image through A of constrained variables in S1.

The linear map A is described by:

Implementing a method for these two functions is sufficient to bridge constrained variables. However, in order for the getters and setters of attributes such as dual solutions and starting values to work as well, a method for the following functions must be implemented:

See the docstrings of each function to see which feature would be missing if it was not implemented for a given bridge.

source
MathOptInterface.Bridges.Variable.VectorizeBridgeType
VectorizeBridge{T,S} <: Bridges.Variable.AbstractBridge

VectorizeBridge implements the following reformulations:

  • $x \ge a$ into $[y] \in \mathbb{R}_+$ with the substitution rule $x = a + y$
  • $x \le a$ into $[y] \in \mathbb{R}_-$ with the substitution rule $x = a + y$
  • $x == a$ into $[y] \in \{0\}$ with the substitution rule $x = a + y$

where T is the coefficient type of a + y.

Source node

VectorizeBridge supports:

Target nodes

VectorizeBridge creates:

source
MathOptInterface.Bridges.Variable.ZerosBridgeType
ZerosBridge{T} <: Bridges.Variable.AbstractBridge

ZerosBridge implements the following reformulation:

  • $x \in \{0\}$ into the substitution rule $x = 0$,

where T is the coefficient type of 0.

Source node

ZerosBridge supports:

Target nodes

ZerosBridge does not create target nodes. It replaces all instances of x with 0 via substitution. This means that no variables are created in the underlying model.

Caveats

The bridged variables are similar to parameters with zero values. Parameters with non-zero values can be created with constrained variables in MOI.EqualTo by combining a VectorizeBridge and this bridge.

However, functions modified by ZerosBridge cannot be unbridged. That is, for a given function, we cannot determine if the bridged variables were used.

A related implication is that this bridge does not support MOI.ConstraintDual. However, if a MOI.Utilities.CachingOptimizer is used, the dual can be determined by the bridged optimizer using MOI.Utilities.get_fallback because the caching optimizer records the unbridged function.

source
diff --git a/dev/moi/submodules/Bridges/overview/index.html b/dev/moi/submodules/Bridges/overview/index.html index 8ae9e0e332f..9c6df9aec6b 100644 --- a/dev/moi/submodules/Bridges/overview/index.html +++ b/dev/moi/submodules/Bridges/overview/index.html @@ -66,4 +66,4 @@ julia> MOI.get(inner_optimizer, MOI.ListOfConstraintTypesPresent()) 1-element Vector{Tuple{Type, Type}}: - (MathOptInterface.VariableIndex, MathOptInterface.Interval{Float64}) + (MathOptInterface.VariableIndex, MathOptInterface.Interval{Float64}) diff --git a/dev/moi/submodules/Bridges/reference/index.html b/dev/moi/submodules/Bridges/reference/index.html index 6eeac75db05..d1101af0fc5 100644 --- a/dev/moi/submodules/Bridges/reference/index.html +++ b/dev/moi/submodules/Bridges/reference/index.html @@ -221,4 +221,4 @@ cost::Int, )

As an alternative to variable_node, add a virtual edge to graph that represents adding a free variable, followed by a constraint of type constraint_node, with bridging cost cost.

Why is this needed?

Variables can either be added as a variable constrained on creation, or as a free variable which then has a constraint added to it.

source
MathOptInterface.Bridges.bridge_indexFunction
bridge_index(graph::Graph, node::VariableNode)::Int
 bridge_index(graph::Graph, node::ConstraintNode)::Int
-bridge_index(graph::Graph, node::ObjectiveNode)::Int

Return the optimal index of the bridge to chose from node.

source
MathOptInterface.Bridges.is_variable_edge_bestFunction
is_variable_edge_best(graph::Graph, node::VariableNode)::Bool

Return a Bool indicating whether node should be added as a variable constrained on creation, or as a free variable followed by a constraint.

source
+bridge_index(graph::Graph, node::ObjectiveNode)::Int

Return the optimal index of the bridge to chose from node.

source
MathOptInterface.Bridges.is_variable_edge_bestFunction
is_variable_edge_best(graph::Graph, node::VariableNode)::Bool

Return a Bool indicating whether node should be added as a variable constrained on creation, or as a free variable followed by a constraint.

source
diff --git a/dev/moi/submodules/FileFormats/overview/index.html b/dev/moi/submodules/FileFormats/overview/index.html index 90cfa3a7f27..640bde6a894 100644 --- a/dev/moi/submodules/FileFormats/overview/index.html +++ b/dev/moi/submodules/FileFormats/overview/index.html @@ -158,4 +158,4 @@ path: [variables][1] instance: Dict{String, Any}("NaMe" => "x") schema key: required -schema value: Any["name"] +schema value: Any["name"] diff --git a/dev/moi/submodules/FileFormats/reference/index.html b/dev/moi/submodules/FileFormats/reference/index.html index c7cfd2610d6..8644cf69aed 100644 --- a/dev/moi/submodules/FileFormats/reference/index.html +++ b/dev/moi/submodules/FileFormats/reference/index.html @@ -26,4 +26,4 @@ )

Parse the .sol file filename created by solving model and return a SolFileResults struct.

The returned struct supports the MOI.get API for querying result attributes such as MOI.TerminationStatus, MOI.VariablePrimal, and MOI.ConstraintDual.

source
SolFileResults(
     raw_status::String,
     termination_status::MOI.TerminationStatusCode,
-)

Return a SolFileResults struct with MOI.RawStatusString set to raw_status, MOI.TerminationStatus set to termination_status, and MOI.PrimalStatus and MOI.DualStatus set to NO_SOLUTION.

All other attributes are un-set.

source
+)

Return a SolFileResults struct with MOI.RawStatusString set to raw_status, MOI.TerminationStatus set to termination_status, and MOI.PrimalStatus and MOI.DualStatus set to NO_SOLUTION.

All other attributes are un-set.

source diff --git a/dev/moi/submodules/Nonlinear/overview/index.html b/dev/moi/submodules/Nonlinear/overview/index.html index 4d7c0301f45..ea20dc92428 100644 --- a/dev/moi/submodules/Nonlinear/overview/index.html +++ b/dev/moi/submodules/Nonlinear/overview/index.html @@ -184,4 +184,4 @@ Node(NODE_VARIABLE, 1, 1), ], [2.0], - );

The ordering of the nodes in the tape must satisfy two rules:

Design goals

This is less readable than the other options, but does this data structure meet our design goals?

Instead of a heap-allocated object for each node, we only have two Vectors for each expression, nodes and values, as well as two constant vectors for the OPERATORS. In addition, all fields are concretely typed, and there are no Union or Any types.

For our third goal, it is not easy to identify the children of a node, but it is easy to identify the parent of any node. Therefore, we can use Nonlinear.adjacency_matrix to compute a sparse matrix that maps parents to their children.

The design in practice

In practice, Node and Expression are exactly Nonlinear.Node and Nonlinear.Expression. However, Nonlinear.NodeType has more fields to account for comparison operators such as :>= and :<=, logic operators such as :&& and :||, nonlinear parameters, and nested subexpressions.

Moreover, instead of storing the operators as global constants, they are stored in Nonlinear.OperatorRegistry, and it also stores a vector of logic operators and a vector of comparison operators. In addition to Nonlinear.DEFAULT_UNIVARIATE_OPERATORS and Nonlinear.DEFAULT_MULTIVARIATE_OPERATORS, you can register user-defined functions using Nonlinear.register_operator.

Nonlinear.Model is a struct that stores the Nonlinear.OperatorRegistry, as well as a list of parameters and subexpressions in the model.

ReverseAD

Nonlinear.ReverseAD is a submodule for computing derivatives of a nonlinear optimization problem using sparse reverse-mode automatic differentiation (AD).

This section does not attempt to explain how sparse reverse-mode AD works, but instead explains why MOI contains its own implementation, and highlights notable differences from similar packages.

Warning

Don't use the API in ReverseAD to compute derivatives. Instead, create a Nonlinear.Evaluator object with Nonlinear.SparseReverseMode as the backend, and then query the MOI API methods.

Design goals

The JuliaDiff organization maintains a list of packages for doing AD in Julia. At last count, there were at least ten packages——not including ReverseAD——for reverse-mode AD in Julia. ReverseAD exists because it has a different set of design goals.

History

ReverseAD started life as ReverseDiffSparse.jl, development of which began in early 2014(!). This was well before the other AD packages started development. Because we had a well-tested, working AD in JuMP, there was less motivation to contribute to and explore other AD packages. The lack of historical interaction also meant that other packages were not optimized for the types of problems that JuMP is built for (that is, large-scale sparse problems). When we first created MathOptInterface, we kept the AD in JuMP to simplify the transition, and post-poned the development of a first-class nonlinear interface in MathOptInterface.

Prior to the introduction of Nonlinear, JuMP's nonlinear implementation was a confusing mix of functions and types spread across the code base and in the private _Derivatives submodule. This made it hard to swap the AD system for another. The main motivation for refactoring JuMP to create the Nonlinear submodule in MathOptInterface was to abstract the interface between JuMP and the AD system, allowing us to swap-in and test new AD systems in the future.

+ );

The ordering of the nodes in the tape must satisfy two rules:

Design goals

This is less readable than the other options, but does this data structure meet our design goals?

Instead of a heap-allocated object for each node, we only have two Vectors for each expression, nodes and values, as well as two constant vectors for the OPERATORS. In addition, all fields are concretely typed, and there are no Union or Any types.

For our third goal, it is not easy to identify the children of a node, but it is easy to identify the parent of any node. Therefore, we can use Nonlinear.adjacency_matrix to compute a sparse matrix that maps parents to their children.

The design in practice

In practice, Node and Expression are exactly Nonlinear.Node and Nonlinear.Expression. However, Nonlinear.NodeType has more fields to account for comparison operators such as :>= and :<=, logic operators such as :&& and :||, nonlinear parameters, and nested subexpressions.

Moreover, instead of storing the operators as global constants, they are stored in Nonlinear.OperatorRegistry, and it also stores a vector of logic operators and a vector of comparison operators. In addition to Nonlinear.DEFAULT_UNIVARIATE_OPERATORS and Nonlinear.DEFAULT_MULTIVARIATE_OPERATORS, you can register user-defined functions using Nonlinear.register_operator.

Nonlinear.Model is a struct that stores the Nonlinear.OperatorRegistry, as well as a list of parameters and subexpressions in the model.

ReverseAD

Nonlinear.ReverseAD is a submodule for computing derivatives of a nonlinear optimization problem using sparse reverse-mode automatic differentiation (AD).

This section does not attempt to explain how sparse reverse-mode AD works, but instead explains why MOI contains its own implementation, and highlights notable differences from similar packages.

Warning

Don't use the API in ReverseAD to compute derivatives. Instead, create a Nonlinear.Evaluator object with Nonlinear.SparseReverseMode as the backend, and then query the MOI API methods.

Design goals

The JuliaDiff organization maintains a list of packages for doing AD in Julia. At last count, there were at least ten packages——not including ReverseAD——for reverse-mode AD in Julia. ReverseAD exists because it has a different set of design goals.

History

ReverseAD started life as ReverseDiffSparse.jl, development of which began in early 2014(!). This was well before the other AD packages started development. Because we had a well-tested, working AD in JuMP, there was less motivation to contribute to and explore other AD packages. The lack of historical interaction also meant that other packages were not optimized for the types of problems that JuMP is built for (that is, large-scale sparse problems). When we first created MathOptInterface, we kept the AD in JuMP to simplify the transition, and post-poned the development of a first-class nonlinear interface in MathOptInterface.

Prior to the introduction of Nonlinear, JuMP's nonlinear implementation was a confusing mix of functions and types spread across the code base and in the private _Derivatives submodule. This made it hard to swap the AD system for another. The main motivation for refactoring JuMP to create the Nonlinear submodule in MathOptInterface was to abstract the interface between JuMP and the AD system, allowing us to swap-in and test new AD systems in the future.

diff --git a/dev/moi/submodules/Nonlinear/reference/index.html b/dev/moi/submodules/Nonlinear/reference/index.html index 4c68527c8cd..f4069924af9 100644 --- a/dev/moi/submodules/Nonlinear/reference/index.html +++ b/dev/moi/submodules/Nonlinear/reference/index.html @@ -236,4 +236,4 @@ julia> MOI.initialize(evaluator, Symbol[]) julia> MOI.Nonlinear.ordinal_index(evaluator, c2) # Returns 1 -1source +1source diff --git a/dev/moi/submodules/Test/overview/index.html b/dev/moi/submodules/Test/overview/index.html index 8108d542b07..a69451be121 100644 --- a/dev/moi/submodules/Test/overview/index.html +++ b/dev/moi/submodules/Test/overview/index.html @@ -167,4 +167,4 @@ ), ) return -end

Finally, you also need to implement Test.version_added. If we added this test when the latest released version of MOI was v0.10.5, define:

version_added(::typeof(test_unit_optimize!_twice)) = v"0.10.6"

Step 6

Commit the changes to git from ~/.julia/dev/MathOptInterface and submit the PR for review.

Tip

If you need help writing a test, open an issue on GitHub, or ask the Developer Chatroom.

+end

Finally, you also need to implement Test.version_added. If we added this test when the latest released version of MOI was v0.10.5, define:

version_added(::typeof(test_unit_optimize!_twice)) = v"0.10.6"

Step 6

Commit the changes to git from ~/.julia/dev/MathOptInterface and submit the PR for review.

Tip

If you need help writing a test, open an issue on GitHub, or ask the Developer Chatroom.

diff --git a/dev/moi/submodules/Test/reference/index.html b/dev/moi/submodules/Test/reference/index.html index 8f06aa17dbd..b15456dbfd7 100644 --- a/dev/moi/submodules/Test/reference/index.html +++ b/dev/moi/submodules/Test/reference/index.html @@ -63,4 +63,4 @@ \text{subject to}\ & x_1 * x_2 * x_3 * x_4 \ge 25 \\ & x_1^2 + x_2^2 + x_3^2 + x_4^2 = 40 \\ & 1 \le x_1, x_2, x_3, x_4 \le 5 -\end{aligned}\]

The optimal solution is [1.000, 4.743, 3.821, 1.379].

source +\end{aligned}\]

The optimal solution is [1.000, 4.743, 3.821, 1.379].

source diff --git a/dev/moi/submodules/Utilities/overview/index.html b/dev/moi/submodules/Utilities/overview/index.html index 243e61b764e..b82a63fb7c1 100644 --- a/dev/moi/submodules/Utilities/overview/index.html +++ b/dev/moi/submodules/Utilities/overview/index.html @@ -378,4 +378,4 @@ index_map = MOI.copy_to(dest, src) for (F, S) in MOI.get(src, MOI.ListOfConstraintTypesPresent()) function_barrier(dest, src, index_map[F, S]) -end +end diff --git a/dev/moi/submodules/Utilities/reference/index.html b/dev/moi/submodules/Utilities/reference/index.html index afc69f92c9e..774ed6f03ca 100644 --- a/dev/moi/submodules/Utilities/reference/index.html +++ b/dev/moi/submodules/Utilities/reference/index.html @@ -91,7 +91,7 @@ typeof(CleverDicts.key_to_index), typeof(CleverDicts.index_to_key), } -end

A struct storing F-in-S constraints as a mapping between the constraint indices to the corresponding tuple of function and set.

source
MathOptInterface.Utilities.StructOfConstraintsType
abstract type StructOfConstraints <: MOI.ModelLike end

A struct storing a subfields other structs storing constraints of different types.

See Utilities.@struct_of_constraints_by_function_types and Utilities.@struct_of_constraints_by_set_types.

source
MathOptInterface.Utilities.@struct_of_constraints_by_function_typesMacro
Utilities.@struct_of_constraints_by_function_types(name, func_types...)

Given a vector of n function types (F1, F2,..., Fn) in func_types, defines a subtype of StructOfConstraints of name name and which type parameters {T, C1, C2, ..., Cn}. It contains n field where the ith field has type Ci and stores the constraints of function type Fi.

The expression Fi can also be a union in which case any constraint for which the function type is in the union is stored in the field with type Ci.

source
MathOptInterface.Utilities.@struct_of_constraints_by_set_typesMacro
Utilities.@struct_of_constraints_by_set_types(name, func_types...)

Given a vector of n set types (S1, S2,..., Sn) in func_types, defines a subtype of StructOfConstraints of name name and which type parameters {T, C1, C2, ..., Cn}. It contains n field where the ith field has type Ci and stores the constraints of set type Si. The expression Si can also be a union in which case any constraint for which the set type is in the union is stored in the field with type Ci. This can be useful if Ci is a MatrixOfConstraints in order to concatenate the coefficients of constraints of several different set types in the same matrix.

source
MathOptInterface.Utilities.struct_of_constraint_codeFunction
struct_of_constraint_code(struct_name, types, field_types = nothing)

Given a vector of n Union{SymbolFun,_UnionSymbolFS{SymbolFun}} or Union{SymbolSet,_UnionSymbolFS{SymbolSet}} in types, defines a subtype of StructOfConstraints of name name and which type parameters {T, F1, F2, ..., Fn} if field_types is nothing and a {T} otherwise. It contains n field where the ith field has type Ci if field_types is nothing and type field_types[i] otherwise. If types is vector of Union{SymbolFun,_UnionSymbolFS{SymbolFun}} (resp. Union{SymbolSet,_UnionSymbolFS{SymbolSet}}) then the constraints of that function (resp. set) type are stored in the corresponding field.

This function is used by the macros @model, @struct_of_constraints_by_function_types and @struct_of_constraints_by_set_types.

source

Caching optimizer

MathOptInterface.Utilities.CachingOptimizerType
CachingOptimizer

CachingOptimizer is an intermediate layer that stores a cache of the model and links it with an optimizer. It supports incremental model construction and modification even when the optimizer doesn't.

Constructors

    CachingOptimizer(cache::MOI.ModelLike, optimizer::AbstractOptimizer)

Creates a CachingOptimizer in AUTOMATIC mode, with the optimizer optimizer.

The type of the optimizer returned is CachingOptimizer{typeof(optimizer), typeof(cache)} so it does not support the function reset_optimizer(::CachingOptimizer, new_optimizer) if the type of new_optimizer is different from the type of optimizer.

    CachingOptimizer(cache::MOI.ModelLike, mode::CachingOptimizerMode)

Creates a CachingOptimizer in the NO_OPTIMIZER state and mode mode.

The type of the optimizer returned is CachingOptimizer{MOI.AbstractOptimizer,typeof(cache)} so it does support the function reset_optimizer(::CachingOptimizer, new_optimizer) if the type of new_optimizer is different from the type of optimizer.

About the type

States

A CachingOptimizer may be in one of three possible states (CachingOptimizerState):

  • NO_OPTIMIZER: The CachingOptimizer does not have any optimizer.
  • EMPTY_OPTIMIZER: The CachingOptimizer has an empty optimizer. The optimizer is not synchronized with the cached model.
  • ATTACHED_OPTIMIZER: The CachingOptimizer has an optimizer, and it is synchronized with the cached model.

Modes

A CachingOptimizer has two modes of operation (CachingOptimizerMode):

  • MANUAL: The only methods that change the state of the CachingOptimizer are Utilities.reset_optimizer, Utilities.drop_optimizer, and Utilities.attach_optimizer. Attempting to perform an operation in the incorrect state results in an error.
  • AUTOMATIC: The CachingOptimizer changes its state when necessary. For example, optimize! will automatically call attach_optimizer (an optimizer must have been previously set). Attempting to add a constraint or perform a modification not supported by the optimizer results in a drop to EMPTY_OPTIMIZER mode.
source
MathOptInterface.Utilities.attach_optimizerFunction
attach_optimizer(model::CachingOptimizer)

Attaches the optimizer to model, copying all model data into it. Can be called only from the EMPTY_OPTIMIZER state. If the copy succeeds, the CachingOptimizer will be in state ATTACHED_OPTIMIZER after the call, otherwise an error is thrown; see MOI.copy_to for more details on which errors can be thrown.

source
MOIU.attach_optimizer(model::GenericModel)

Call MOIU.attach_optimizer on the backend of model.

Cannot be called in direct mode.

source
MathOptInterface.Utilities.reset_optimizerFunction
reset_optimizer(m::CachingOptimizer, optimizer::MOI.AbstractOptimizer)

Sets or resets m to have the given empty optimizer optimizer.

Can be called from any state. An assertion error will be thrown if optimizer is not empty.

The CachingOptimizer m will be in state EMPTY_OPTIMIZER after the call.

source
reset_optimizer(m::CachingOptimizer)

Detaches and empties the current optimizer. Can be called from ATTACHED_OPTIMIZER or EMPTY_OPTIMIZER state. The CachingOptimizer will be in state EMPTY_OPTIMIZER after the call.

source
MOIU.reset_optimizer(model::GenericModel, optimizer::MOI.AbstractOptimizer)

Call MOIU.reset_optimizer on the backend of model.

Cannot be called in direct mode.

source
MOIU.reset_optimizer(model::GenericModel)

Call MOIU.reset_optimizer on the backend of model.

Cannot be called in direct mode.

source
MathOptInterface.Utilities.drop_optimizerFunction
drop_optimizer(m::CachingOptimizer)

Drops the optimizer, if one is present. Can be called from any state. The CachingOptimizer will be in state NO_OPTIMIZER after the call.

source
MOIU.drop_optimizer(model::GenericModel)

Call MOIU.drop_optimizer on the backend of model.

Cannot be called in direct mode.

source
MathOptInterface.Utilities.stateFunction
state(m::CachingOptimizer)::CachingOptimizerState

Returns the state of the CachingOptimizer m. See Utilities.CachingOptimizer.

source
MathOptInterface.Utilities.modeFunction
mode(m::CachingOptimizer)::CachingOptimizerMode

Returns the operating mode of the CachingOptimizer m. See Utilities.CachingOptimizer.

source

Mock optimizer

MathOptInterface.Utilities.MockOptimizerType
MockOptimizer

MockOptimizer is a fake optimizer especially useful for testing. Its main feature is that it can store the values that should be returned for each attribute.

source

Printing

MathOptInterface.Utilities.latex_formulationFunction
latex_formulation(model::MOI.ModelLike; kwargs...)

Wrap model in a type so that it can be pretty-printed as text/latex in a notebook like IJulia, or in Documenter.

To render the model, end the cell with latex_formulation(model), or call display(latex_formulation(model)) in to force the display of the model from inside a function.

Possible keyword arguments are:

  • simplify_coefficients : Simplify coefficients if possible by omitting them or removing trailing zeros.
  • default_name : The name given to variables with an empty name.
  • print_types : Print the MOI type of each function and set for clarity.
source

Copy utilities

MathOptInterface.Utilities.default_copy_toFunction
default_copy_to(dest::MOI.ModelLike, src::MOI.ModelLike)

A default implementation of MOI.copy_to(dest, src) for models that implement the incremental interface, that is, MOI.supports_incremental_interface returns true.

source
MathOptInterface.Utilities.IndexMapType
IndexMap()

The dictionary-like object returned by MOI.copy_to.

source
MathOptInterface.Utilities.identity_index_mapFunction
identity_index_map(model::MOI.ModelLike)

Return an IndexMap that maps all variable and constraint indices of model to themselves.

source
MathOptInterface.Utilities.ModelFilterType
ModelFilter(filter::Function, model::MOI.ModelLike)

A layer to filter out various components of model.

The filter function takes a single argument, which is each element from the list returned by the attributes below. It returns true if the element should be visible in the filtered model and false otherwise.

The components that are filtered are:

  • Entire constraint types via:
    • MOI.ListOfConstraintTypesPresent
  • Individual constraints via:
    • MOI.ListOfConstraintIndices{F,S}
  • Specific attributes via:
    • MOI.ListOfModelAttributesSet
    • MOI.ListOfConstraintAttributesSet
    • MOI.ListOfVariableAttributesSet
Warning

The list of attributes filtered may change in a future release. You should write functions that are generic and not limited to the five types listed above. Thus, you should probably define a fallback filter(::Any) = true.

See below for examples of how this works.

Note

This layer has a limited scope. It is intended by be used in conjunction with MOI.copy_to.

Example: copy model excluding integer constraints

Use the do syntax to provide a single function.

filtered_src = MOI.Utilities.ModelFilter(src) do item
+end

A struct storing F-in-S constraints as a mapping between the constraint indices to the corresponding tuple of function and set.

source
MathOptInterface.Utilities.StructOfConstraintsType
abstract type StructOfConstraints <: MOI.ModelLike end

A struct storing a subfields other structs storing constraints of different types.

See Utilities.@struct_of_constraints_by_function_types and Utilities.@struct_of_constraints_by_set_types.

source
MathOptInterface.Utilities.@struct_of_constraints_by_function_typesMacro
Utilities.@struct_of_constraints_by_function_types(name, func_types...)

Given a vector of n function types (F1, F2,..., Fn) in func_types, defines a subtype of StructOfConstraints of name name and which type parameters {T, C1, C2, ..., Cn}. It contains n field where the ith field has type Ci and stores the constraints of function type Fi.

The expression Fi can also be a union in which case any constraint for which the function type is in the union is stored in the field with type Ci.

source
MathOptInterface.Utilities.@struct_of_constraints_by_set_typesMacro
Utilities.@struct_of_constraints_by_set_types(name, func_types...)

Given a vector of n set types (S1, S2,..., Sn) in func_types, defines a subtype of StructOfConstraints of name name and which type parameters {T, C1, C2, ..., Cn}. It contains n field where the ith field has type Ci and stores the constraints of set type Si. The expression Si can also be a union in which case any constraint for which the set type is in the union is stored in the field with type Ci. This can be useful if Ci is a MatrixOfConstraints in order to concatenate the coefficients of constraints of several different set types in the same matrix.

source
MathOptInterface.Utilities.struct_of_constraint_codeFunction
struct_of_constraint_code(struct_name, types, field_types = nothing)

Given a vector of n Union{SymbolFun,_UnionSymbolFS{SymbolFun}} or Union{SymbolSet,_UnionSymbolFS{SymbolSet}} in types, defines a subtype of StructOfConstraints of name name and which type parameters {T, F1, F2, ..., Fn} if field_types is nothing and a {T} otherwise. It contains n field where the ith field has type Ci if field_types is nothing and type field_types[i] otherwise. If types is vector of Union{SymbolFun,_UnionSymbolFS{SymbolFun}} (resp. Union{SymbolSet,_UnionSymbolFS{SymbolSet}}) then the constraints of that function (resp. set) type are stored in the corresponding field.

This function is used by the macros @model, @struct_of_constraints_by_function_types and @struct_of_constraints_by_set_types.

source

Caching optimizer

MathOptInterface.Utilities.CachingOptimizerType
CachingOptimizer

CachingOptimizer is an intermediate layer that stores a cache of the model and links it with an optimizer. It supports incremental model construction and modification even when the optimizer doesn't.

Constructors

    CachingOptimizer(cache::MOI.ModelLike, optimizer::AbstractOptimizer)

Creates a CachingOptimizer in AUTOMATIC mode, with the optimizer optimizer.

The type of the optimizer returned is CachingOptimizer{typeof(optimizer), typeof(cache)} so it does not support the function reset_optimizer(::CachingOptimizer, new_optimizer) if the type of new_optimizer is different from the type of optimizer.

    CachingOptimizer(cache::MOI.ModelLike, mode::CachingOptimizerMode)

Creates a CachingOptimizer in the NO_OPTIMIZER state and mode mode.

The type of the optimizer returned is CachingOptimizer{MOI.AbstractOptimizer,typeof(cache)} so it does support the function reset_optimizer(::CachingOptimizer, new_optimizer) if the type of new_optimizer is different from the type of optimizer.

About the type

States

A CachingOptimizer may be in one of three possible states (CachingOptimizerState):

  • NO_OPTIMIZER: The CachingOptimizer does not have any optimizer.
  • EMPTY_OPTIMIZER: The CachingOptimizer has an empty optimizer. The optimizer is not synchronized with the cached model.
  • ATTACHED_OPTIMIZER: The CachingOptimizer has an optimizer, and it is synchronized with the cached model.

Modes

A CachingOptimizer has two modes of operation (CachingOptimizerMode):

  • MANUAL: The only methods that change the state of the CachingOptimizer are Utilities.reset_optimizer, Utilities.drop_optimizer, and Utilities.attach_optimizer. Attempting to perform an operation in the incorrect state results in an error.
  • AUTOMATIC: The CachingOptimizer changes its state when necessary. For example, optimize! will automatically call attach_optimizer (an optimizer must have been previously set). Attempting to add a constraint or perform a modification not supported by the optimizer results in a drop to EMPTY_OPTIMIZER mode.
source
MathOptInterface.Utilities.attach_optimizerFunction
attach_optimizer(model::CachingOptimizer)

Attaches the optimizer to model, copying all model data into it. Can be called only from the EMPTY_OPTIMIZER state. If the copy succeeds, the CachingOptimizer will be in state ATTACHED_OPTIMIZER after the call, otherwise an error is thrown; see MOI.copy_to for more details on which errors can be thrown.

source
MOIU.attach_optimizer(model::GenericModel)

Call MOIU.attach_optimizer on the backend of model.

Cannot be called in direct mode.

source
MathOptInterface.Utilities.reset_optimizerFunction
reset_optimizer(m::CachingOptimizer, optimizer::MOI.AbstractOptimizer)

Sets or resets m to have the given empty optimizer optimizer.

Can be called from any state. An assertion error will be thrown if optimizer is not empty.

The CachingOptimizer m will be in state EMPTY_OPTIMIZER after the call.

source
reset_optimizer(m::CachingOptimizer)

Detaches and empties the current optimizer. Can be called from ATTACHED_OPTIMIZER or EMPTY_OPTIMIZER state. The CachingOptimizer will be in state EMPTY_OPTIMIZER after the call.

source
MOIU.reset_optimizer(model::GenericModel, optimizer::MOI.AbstractOptimizer)

Call MOIU.reset_optimizer on the backend of model.

Cannot be called in direct mode.

source
MOIU.reset_optimizer(model::GenericModel)

Call MOIU.reset_optimizer on the backend of model.

Cannot be called in direct mode.

source
MathOptInterface.Utilities.drop_optimizerFunction
drop_optimizer(m::CachingOptimizer)

Drops the optimizer, if one is present. Can be called from any state. The CachingOptimizer will be in state NO_OPTIMIZER after the call.

source
MOIU.drop_optimizer(model::GenericModel)

Call MOIU.drop_optimizer on the backend of model.

Cannot be called in direct mode.

source
MathOptInterface.Utilities.stateFunction
state(m::CachingOptimizer)::CachingOptimizerState

Returns the state of the CachingOptimizer m. See Utilities.CachingOptimizer.

source
MathOptInterface.Utilities.modeFunction
mode(m::CachingOptimizer)::CachingOptimizerMode

Returns the operating mode of the CachingOptimizer m. See Utilities.CachingOptimizer.

source

Mock optimizer

MathOptInterface.Utilities.MockOptimizerType
MockOptimizer

MockOptimizer is a fake optimizer especially useful for testing. Its main feature is that it can store the values that should be returned for each attribute.

source

Printing

MathOptInterface.Utilities.latex_formulationFunction
latex_formulation(model::MOI.ModelLike; kwargs...)

Wrap model in a type so that it can be pretty-printed as text/latex in a notebook like IJulia, or in Documenter.

To render the model, end the cell with latex_formulation(model), or call display(latex_formulation(model)) in to force the display of the model from inside a function.

Possible keyword arguments are:

  • simplify_coefficients : Simplify coefficients if possible by omitting them or removing trailing zeros.
  • default_name : The name given to variables with an empty name.
  • print_types : Print the MOI type of each function and set for clarity.
source

Copy utilities

MathOptInterface.Utilities.default_copy_toFunction
default_copy_to(dest::MOI.ModelLike, src::MOI.ModelLike)

A default implementation of MOI.copy_to(dest, src) for models that implement the incremental interface, that is, MOI.supports_incremental_interface returns true.

source
MathOptInterface.Utilities.IndexMapType
IndexMap()

The dictionary-like object returned by MOI.copy_to.

source
MathOptInterface.Utilities.identity_index_mapFunction
identity_index_map(model::MOI.ModelLike)

Return an IndexMap that maps all variable and constraint indices of model to themselves.

source
MathOptInterface.Utilities.ModelFilterType
ModelFilter(filter::Function, model::MOI.ModelLike)

A layer to filter out various components of model.

The filter function takes a single argument, which is each element from the list returned by the attributes below. It returns true if the element should be visible in the filtered model and false otherwise.

The components that are filtered are:

  • Entire constraint types via:
    • MOI.ListOfConstraintTypesPresent
  • Individual constraints via:
    • MOI.ListOfConstraintIndices{F,S}
  • Specific attributes via:
    • MOI.ListOfModelAttributesSet
    • MOI.ListOfConstraintAttributesSet
    • MOI.ListOfVariableAttributesSet
Warning

The list of attributes filtered may change in a future release. You should write functions that are generic and not limited to the five types listed above. Thus, you should probably define a fallback filter(::Any) = true.

See below for examples of how this works.

Note

This layer has a limited scope. It is intended by be used in conjunction with MOI.copy_to.

Example: copy model excluding integer constraints

Use the do syntax to provide a single function.

filtered_src = MOI.Utilities.ModelFilter(src) do item
     return item != (MOI.VariableIndex, MOI.Integer)
 end
 MOI.copy_to(dest, filtered_src)

Example: copy model excluding names

Use type dispatch to simplify the implementation:

my_filter(::Any) = true  # Note the generic fallback
@@ -343,4 +343,4 @@
 For performance, it is recommended that the inner loop lies in a separate
 function to guarantee type-stability.
 
-If you want an iterator of all current outer keys, use [`outer_keys`](@ref).
source
+If you want an iterator of all current outer keys, use [`outer_keys`](@ref).source diff --git a/dev/moi/tutorials/bridging_constraint/index.html b/dev/moi/tutorials/bridging_constraint/index.html index 07b1e2d1b0f..98cfc9ff0d1 100644 --- a/dev/moi/tutorials/bridging_constraint/index.html +++ b/dev/moi/tutorials/bridging_constraint/index.html @@ -103,4 +103,4 @@ end

Bridge deletion

When a bridge is deleted, the constraints it added must be deleted too.

function delete(model::ModelLike, bridge::SignBridge)
     delete(model, bridge.constraint)
     return
-end
+end diff --git a/dev/moi/tutorials/example/index.html b/dev/moi/tutorials/example/index.html index 5abacd5bbac..3473bb6469c 100644 --- a/dev/moi/tutorials/example/index.html +++ b/dev/moi/tutorials/example/index.html @@ -46,4 +46,4 @@ 3-element Vector{Float64}: 1.0 1.0 - 1.0 + 1.0 diff --git a/dev/moi/tutorials/implementing/index.html b/dev/moi/tutorials/implementing/index.html index 94699162fb2..961f6add1ba 100644 --- a/dev/moi/tutorials/implementing/index.html +++ b/dev/moi/tutorials/implementing/index.html @@ -115,4 +115,4 @@ n = # Code to get NumberOfObjectives return n end

Then, the user can write:

model = Gurobi.Optimizer()
-MOI.set(model, Gurobi.NumberofObjectives(), 3)
+MOI.set(model, Gurobi.NumberofObjectives(), 3) diff --git a/dev/moi/tutorials/latency/index.html b/dev/moi/tutorials/latency/index.html index 8f5096f4281..da18ab46368 100644 --- a/dev/moi/tutorials/latency/index.html +++ b/dev/moi/tutorials/latency/index.html @@ -130,4 +130,4 @@ end

You can create a flame-graph via

using SnoopCompile
 tinf = @snoopi_deep example_diet(GLPK.Optimizer, true)
 using ProfileView
-ProfileView.view(flamegraph(tinf))

Here's how things looked in mid-August 2021: flamegraph

There are a few opportunities for improvement (non-red flames, particularly on the right). But the main problem is a large red (non-precompilable due to method ownership) flame.

+ProfileView.view(flamegraph(tinf))

Here's how things looked in mid-August 2021: flamegraph

There are a few opportunities for improvement (non-red flames, particularly on the right). But the main problem is a large red (non-precompilable due to method ownership) flame.

diff --git a/dev/moi/tutorials/manipulating_expressions/index.html b/dev/moi/tutorials/manipulating_expressions/index.html index eb19ec33560..879c2fc8736 100644 --- a/dev/moi/tutorials/manipulating_expressions/index.html +++ b/dev/moi/tutorials/manipulating_expressions/index.html @@ -23,4 +23,4 @@ 2-element Vector{MathOptInterface.ScalarAffineFunction{Int64}}: (2) + (1) MOI.VariableIndex(1) (4) + (2) MOI.VariableIndex(1)
Note

Utilities.eachscalar returns an iterator on the dimensions, which serves the same purpose as Utilities.scalarize.

output_dimension returns the number of dimensions of the output of a function:

julia> MOI.output_dimension(g)
-2
+2 diff --git a/dev/moi/tutorials/mathprogbase/index.html b/dev/moi/tutorials/mathprogbase/index.html index 488be27cf3c..ed787ca9868 100644 --- a/dev/moi/tutorials/mathprogbase/index.html +++ b/dev/moi/tutorials/mathprogbase/index.html @@ -55,4 +55,4 @@ objval = objective_value(model), sol = value.(x) ) -end +end diff --git a/dev/objects.inv b/dev/objects.inv index 52643d99f3c..2b4490976bc 100644 Binary files a/dev/objects.inv and b/dev/objects.inv differ diff --git a/dev/packages/Alpine/index.html b/dev/packages/Alpine/index.html index 98407a291cb..a89618549cf 100644 --- a/dev/packages/Alpine/index.html +++ b/dev/packages/Alpine/index.html @@ -46,4 +46,4 @@ author={Kim, Jongeun and Richard, Jean-Philippe P. and Tawarmalani, Mohit}, eprinttype={Optimization Online}, date={2022} -} +} diff --git a/dev/packages/AmplNLWriter/index.html b/dev/packages/AmplNLWriter/index.html index 95d8d35f8fc..2b5ea7bbb0d 100644 --- a/dev/packages/AmplNLWriter/index.html +++ b/dev/packages/AmplNLWriter/index.html @@ -12,4 +12,4 @@ import Bonmin_jll model = Model(() -> AmplNLWriter.Optimizer(Bonmin_jll.amplexe)) set_attribute(model, "bonmin.nlp_log_level", 0)

opt files

Some options need to be specified via an .opt file.

This file must be located in the current working directory whenever the model is solved.

The .opt file must be named after the name of the solver, for example, bonmin.opt, and each line must contain an option name and the desired value, separated by a space.

For example, to set the absolute and relative tolerances in Couenne to 1 and 0.05 respectively, the couenne.opt file should contain:

allowable_gap 1
-allowable_fraction_gap 0.05
+allowable_fraction_gap 0.05 diff --git a/dev/packages/BARON/index.html b/dev/packages/BARON/index.html index d1ff963017a..79602fff1c6 100644 --- a/dev/packages/BARON/index.html +++ b/dev/packages/BARON/index.html @@ -6,4 +6,4 @@

BARON.jl

Build Status codecov

BARON.jl is a wrapper for BARON by The Optimization Firm.

Affiliation

This wrapper is maintained by the JuMP community and is not officially supported by The Optimization Firm.

Getting help

If you need help, please ask a question on the JuMP community forum.

If you have a reproducible example of a bug, please open a GitHub issue.

License

BARON.jl is licensed under the MIT License.

The underlying solver is a closed-source commercial product for which you must obtain a license from The Optimization Firm, although a small trial version is available for free.

Installation

First, download a copy of the BARON solver and unpack the executable in a location of your choosing.

Once installed, set the BARON_EXEC environment variable pointing to the BARON executable (full path, including file name as it differs across platforms), and run Pkg.add("BARON"). For example:

ENV["BARON_EXEC"] = "/path/to/baron.exe"
 using Pkg
 Pkg.add("BARON")

The baronlice.txt license file should be placed in the same directory as the BARON executable, or in your current working directory.

Use with JuMP

using JuMP, BARON
-model = Model(BARON.Optimizer)

MathOptInterface API

The BARON optimizer supports the following constraints and attributes.

List of supported objective functions:

List of supported variable types:

List of supported constraint types:

List of supported model attributes:

+model = Model(BARON.Optimizer)

MathOptInterface API

The BARON optimizer supports the following constraints and attributes.

List of supported objective functions:

List of supported variable types:

List of supported constraint types:

List of supported model attributes:

diff --git a/dev/packages/BilevelJuMP/index.html b/dev/packages/BilevelJuMP/index.html index a34b38fa74f..a709666268d 100644 --- a/dev/packages/BilevelJuMP/index.html +++ b/dev/packages/BilevelJuMP/index.html @@ -34,4 +34,4 @@ objective_value(model) # = 3 * (3.5 * 8/15) + 8/15 # = 6.13... value(x) # = 3.5 * 8/15 # = 1.86... -value(y) # = 8/15 # = 0.53... +value(y) # = 8/15 # = 0.53... diff --git a/dev/packages/CDCS/index.html b/dev/packages/CDCS/index.html index ad1598fd790..18c20e6a021 100644 --- a/dev/packages/CDCS/index.html +++ b/dev/packages/CDCS/index.html @@ -27,4 +27,4 @@ mat"cdcsInstall" end -julia> mat"savepath" +julia> mat"savepath" diff --git a/dev/packages/CDDLib/index.html b/dev/packages/CDDLib/index.html index f6285fa7200..6f9c2185016 100644 --- a/dev/packages/CDDLib/index.html +++ b/dev/packages/CDDLib/index.html @@ -6,4 +6,4 @@

CDDLib

CDDLib.jl is a wrapper for cddlib.

CDDLib.jl can be used with C API of cddlib, the higher level interface of Polyhedra.jl, or as a linear programming solver with JuMP or MathOptInterface.

Problem description

As written in the README of cddlib:

The C-library cddlib is a C implementation of the Double Description Method of Motzkin et al. for generating all vertices (that is, extreme points) and extreme rays of a general convex polyhedron in R^d given by a system of linear inequalities:

P = { x=(x1, ..., xd)^T :  b - A  x  >= 0 }

where A is a given m x d real matrix, b is a given m-vector and 0 is the m-vector of all zeros.

The program can be used for the reverse operation (that is, convex hull computation). This means that one can move back and forth between an inequality representation and a generator (that is, vertex and ray) representation of a polyhedron with cdd. Also, cdd can solve a linear programming problem, that is, a problem of maximizing and minimizing a linear function over P.

License

CDDLib.jl is licensed under the GPL v2 license.

The underlying solver, cddlib/cddlib is also licensed under the GPL v2 license.

Installation

Install CDDLib.jl using the Julia package manager:

import Pkg
 Pkg.add("CDDLib")

Building the package will download binaries of cddlib that are provided by cddlib_jll.jl.

Use with JuMP

Use CDDLib.Optimizer{Float64} to use CDDLib.jl with JuMP:

using JuMP, CDDLib
 model = Model(CDDLib.Optimizer{Float64})

When using CDDLib.jl with MathOptInterface, you can pass a different number type:

using MathOptInterface, CDDLib
-model = CDDLib.Optimizer{Rational{BigInt}}()

Debugging

CDDLib.jl uses two global Boolean variables to enable debugging outputs: debug and log.

You can query the value of debug and log with get_debug and get_log, and set their values with set_debug and set_log.

+model = CDDLib.Optimizer{Rational{BigInt}}()

Debugging

CDDLib.jl uses two global Boolean variables to enable debugging outputs: debug and log.

You can query the value of debug and log with get_debug and get_log, and set their values with set_debug and set_log.

diff --git a/dev/packages/COPT/index.html b/dev/packages/COPT/index.html index a1d7ed7ea74..7d423d8c190 100644 --- a/dev/packages/COPT/index.html +++ b/dev/packages/COPT/index.html @@ -39,4 +39,4 @@ @show value.(X) @show value.(z) @show shadow_price(c1) -@show shadow_price(c2) +@show shadow_price(c2) diff --git a/dev/packages/COSMO/index.html b/dev/packages/COSMO/index.html index f09336c6d9d..bd09a35d911 100644 --- a/dev/packages/COSMO/index.html +++ b/dev/packages/COSMO/index.html @@ -34,4 +34,4 @@ publisher = {Springer}, doi = {10.1007/s10957-021-01896-x}, url = {https://doi.org/10.1007/s10957-021-01896-x} -}

The article is available under Open Access here.

Contributing

Python - Interface

COSMO can also be called from Python. Take a look at: cosmo-python

Licence 🔍

This project is licensed under the Apache License - see the LICENSE.md file for details.

+}

The article is available under Open Access here.

Contributing

Python - Interface

COSMO can also be called from Python. Take a look at: cosmo-python

Licence 🔍

This project is licensed under the Apache License - see the LICENSE.md file for details.

diff --git a/dev/packages/CPLEX/index.html b/dev/packages/CPLEX/index.html index 47f114b6847..a234576e310 100644 --- a/dev/packages/CPLEX/index.html +++ b/dev/packages/CPLEX/index.html @@ -163,4 +163,4 @@ x_optimal = value.(x) y_optimal = value.(y) println("x: $(x_optimal), y: $(y_optimal)") -end +end diff --git a/dev/packages/CSDP/index.html b/dev/packages/CSDP/index.html index f5eaed98d4c..1a29f1f3cd3 100644 --- a/dev/packages/CSDP/index.html +++ b/dev/packages/CSDP/index.html @@ -10,4 +10,4 @@ A(X) = a X ⪰ 0

where A(X) = [⟨A_1, X⟩, ..., ⟨A_m, X⟩]. The corresponding dual is:

min ⟨a, y⟩
      A'(y) - C = Z
-             Z ⪰ 0

where A'(y) = y_1A_1 + ... + y_mA_m

Termination criteria

CSDP will terminate successfully (or partially) in the following cases:

In addition, if the printlevel option is at least 1, the following will be printed:

In theory, for feasible primal and dual solutions, ⟨a, y⟩ - ⟨C, X⟩ = ⟨Z, X⟩, so the objective and XY duality gap should be equivalent. However, in practice, there are sometimes solution which satisfy primal and dual feasibility tolerances but have objective duality gap which are not close to XY duality gap. In some cases, the objective duality gap may even become negative (hence the tweakgap option). This is the reason usexygap is 1 by default.

CSDP considers that X ⪰ 0 (resp. Z ⪰ 0) is satisfied when the Cholesky factorizations can be computed. In practice, this is somewhat more conservative than simply requiring all eigenvalues to be nonnegative.

Status

The table below shows how the different CSDP statuses are converted to the MathOptInterface statuses.

CSDP codeStateDescriptionMOI status
0SuccessSDP solvedMOI.OPTIMAL
1SuccessThe problem is primal infeasible, and we have a certificateMOI.INFEASIBLE
2SuccessThe problem is dual infeasible, and we have a certificateMOI.DUAL_INFEASIBLE
3Partial SuccessA solution has been found, but full accuracy was not achievedMOI.ALMOST_OPTIMAL
4FailureMaximum iterations reachedMOI.ITERATION_LIMIT
5FailureStuck at edge of primal feasibilityMOI.SLOW_PROGRESS
6FailureStuck at edge of dual infeasibilityMOI.SLOW_PROGRESS
7FailureLack of progressMOI.SLOW_PROGRESS
8FailureX, Z, or O was singularMOI.NUMERICAL_ERROR
9FailureDetected NaN or Inf valuesMOI.NUMERICAL_ERROR
+ Z ⪰ 0

where A'(y) = y_1A_1 + ... + y_mA_m

Termination criteria

CSDP will terminate successfully (or partially) in the following cases:

In addition, if the printlevel option is at least 1, the following will be printed:

In theory, for feasible primal and dual solutions, ⟨a, y⟩ - ⟨C, X⟩ = ⟨Z, X⟩, so the objective and XY duality gap should be equivalent. However, in practice, there are sometimes solution which satisfy primal and dual feasibility tolerances but have objective duality gap which are not close to XY duality gap. In some cases, the objective duality gap may even become negative (hence the tweakgap option). This is the reason usexygap is 1 by default.

CSDP considers that X ⪰ 0 (resp. Z ⪰ 0) is satisfied when the Cholesky factorizations can be computed. In practice, this is somewhat more conservative than simply requiring all eigenvalues to be nonnegative.

Status

The table below shows how the different CSDP statuses are converted to the MathOptInterface statuses.

CSDP codeStateDescriptionMOI status
0SuccessSDP solvedMOI.OPTIMAL
1SuccessThe problem is primal infeasible, and we have a certificateMOI.INFEASIBLE
2SuccessThe problem is dual infeasible, and we have a certificateMOI.DUAL_INFEASIBLE
3Partial SuccessA solution has been found, but full accuracy was not achievedMOI.ALMOST_OPTIMAL
4FailureMaximum iterations reachedMOI.ITERATION_LIMIT
5FailureStuck at edge of primal feasibilityMOI.SLOW_PROGRESS
6FailureStuck at edge of dual infeasibilityMOI.SLOW_PROGRESS
7FailureLack of progressMOI.SLOW_PROGRESS
8FailureX, Z, or O was singularMOI.NUMERICAL_ERROR
9FailureDetected NaN or Inf valuesMOI.NUMERICAL_ERROR
diff --git a/dev/packages/Cbc/index.html b/dev/packages/Cbc/index.html index 47eb743ab3e..61909f8b0a0 100644 --- a/dev/packages/Cbc/index.html +++ b/dev/packages/Cbc/index.html @@ -9,4 +9,4 @@ set_attribute(model, "logLevel", 1)

MathOptInterface API

The COIN Branch-and-Cut (Cbc) optimizer supports the following constraints and attributes.

List of supported objective functions:

List of supported variable types:

List of supported constraint types:

List of supported model attributes:

List of supported optimizer attributes:

List of supported variable attributes:

List of supported constraint attributes:

Options

Options are, unfortunately, not well documented.

The following options are likely to be the most useful:

ParameterExampleExplanation
seconds60.0Solution timeout limit
logLevel2Set to 0 to disable solution output
maxSolutions1Terminate after this many feasible solutions have been found
maxNodes1Terminate after this many branch-and-bound nodes have been evaluated
allowableGap0.05Terminate after optimality gap is less than this value (on an absolute scale)
ratioGap0.05Terminate after optimality gap is smaller than this relative fraction
threads1Set the number of threads to use for parallel branch & bound

The complete list of parameters can be found by running the cbc executable and typing ? at the prompt.

Start the cbc executable from Julia as follows:

using Cbc_jll
 Cbc_jll.cbc() do exe
     run(`$(exe)`)
-end
+end diff --git a/dev/packages/Clarabel/index.html b/dev/packages/Clarabel/index.html index 44e8ed0110c..4f4a87b1583 100644 --- a/dev/packages/Clarabel/index.html +++ b/dev/packages/Clarabel/index.html @@ -33,4 +33,4 @@

eprint={2405.12762}, archivePrefix={arXiv}, primaryClass={math.OC} -}

License 🔍

This project is licensed under the Apache License 2.0 - see the LICENSE.md file for details.

+}

License 🔍

This project is licensed under the Apache License 2.0 - see the LICENSE.md file for details.

diff --git a/dev/packages/Clp/index.html b/dev/packages/Clp/index.html index 3f5a9354b68..878c14c48f1 100644 --- a/dev/packages/Clp/index.html +++ b/dev/packages/Clp/index.html @@ -7,4 +7,4 @@ Pkg.add("Clp")

In addition to installing the Clp.jl package, this will also download and install the Clp binaries. You do not need to install Clp separately.

To use a custom binary, read the Custom solver binaries section of the JuMP documentation.

Use with JuMP

To use Clp with JuMP, use Clp.Optimizer:

using JuMP, Clp
 model = Model(Clp.Optimizer)
 set_attribute(model, "LogLevel", 1)
-set_attribute(model, "Algorithm", 4)

MathOptInterface API

The Clp optimizer supports the following constraints and attributes.

List of supported objective functions:

List of supported variable types:

List of supported constraint types:

List of supported model attributes:

Options

Options are, unfortunately, not well documented.

The following options are likely to be the most useful:

ParameterExampleExplanation
PrimalTolerance1e-7Primal feasibility tolerance
DualTolerance1e-7Dual feasibility tolerance
DualObjectiveLimit1e308When using dual simplex (where the objective is monotonically changing), terminate when the objective exceeds this limit
MaximumIterations2147483647Terminate after performing this number of simplex iterations
MaximumSeconds-1.0Terminate after this many seconds have passed. A negative value means no time limit
LogLevel1Set to 1, 2, 3, or 4 for increasing output. Set to 0 to disable output
PresolveType0Set to 1 to disable presolve
SolveType5Solution method: dual simplex (0), primal simplex (1), sprint (2), barrier with crossover (3), barrier without crossover (4), automatic (5)
InfeasibleReturn0Set to 1 to return as soon as the problem is found to be infeasible (by default, an infeasibility proof is computed as well)
Scaling30 -off, 1 equilibrium, 2 geometric, 3 auto, 4 dynamic(later)
Perturbation100switch on perturbation (50), automatic (100), don't try perturbing (102)

C API

The C API can be accessed via Clp.Clp_XXX functions, where the names and arguments are identical to the C API.

+set_attribute(model, "Algorithm", 4)

MathOptInterface API

The Clp optimizer supports the following constraints and attributes.

List of supported objective functions:

List of supported variable types:

List of supported constraint types:

List of supported model attributes:

Options

Options are, unfortunately, not well documented.

The following options are likely to be the most useful:

ParameterExampleExplanation
PrimalTolerance1e-7Primal feasibility tolerance
DualTolerance1e-7Dual feasibility tolerance
DualObjectiveLimit1e308When using dual simplex (where the objective is monotonically changing), terminate when the objective exceeds this limit
MaximumIterations2147483647Terminate after performing this number of simplex iterations
MaximumSeconds-1.0Terminate after this many seconds have passed. A negative value means no time limit
LogLevel1Set to 1, 2, 3, or 4 for increasing output. Set to 0 to disable output
PresolveType0Set to 1 to disable presolve
SolveType5Solution method: dual simplex (0), primal simplex (1), sprint (2), barrier with crossover (3), barrier without crossover (4), automatic (5)
InfeasibleReturn0Set to 1 to return as soon as the problem is found to be infeasible (by default, an infeasibility proof is computed as well)
Scaling30 -off, 1 equilibrium, 2 geometric, 3 auto, 4 dynamic(later)
Perturbation100switch on perturbation (50), automatic (100), don't try perturbing (102)

C API

The C API can be accessed via Clp.Clp_XXX functions, where the names and arguments are identical to the C API.

diff --git a/dev/packages/DAQP/index.html b/dev/packages/DAQP/index.html index 7182088241b..c1be5f9ea72 100644 --- a/dev/packages/DAQP/index.html +++ b/dev/packages/DAQP/index.html @@ -3,6 +3,6 @@ function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'G-0RZ8X3D3D0', {'page_path': location.pathname + location.search + location.hash}); -

DAQP.jl

DAQP.jl is a Julia wrapper for the Quadratic Programming solver DAQP.

License

DAQP.jl is licensed under the MIT license.

The underlying solver, darnstrom/daqp is licensed under the MIT license.

Installation

Install DAQP.jl using the Julia package manager:

import Pkg
+

DAQP.jl

DAQP.jl is a Julia wrapper for the Quadratic Programming solver DAQP.

License

DAQP.jl is licensed under the MIT license.

The underlying solver, darnstrom/daqp is licensed under the MIT license.

Installation

Install DAQP.jl using the Julia package manager:

import Pkg
 Pkg.add("DAQP")

Use with JuMP

To use DAQP with JuMP, do:

using JuMP, DAQP
-model = Model(DAQP.Optimizer)

Documentation

General information about the solver is available at https://darnstrom.github.io/daqp/, and specifics for the Julia interface are available at https://darnstrom.github.io/daqp/start/julia.

+model = Model(DAQP.Optimizer)

Documentation

General information about the solver is available at https://darnstrom.github.io/daqp/, and specifics for the Julia interface are available at https://darnstrom.github.io/daqp/start/julia.

diff --git a/dev/packages/DSDP/index.html b/dev/packages/DSDP/index.html index ea10791bb53..109a588ff81 100644 --- a/dev/packages/DSDP/index.html +++ b/dev/packages/DSDP/index.html @@ -8,4 +8,4 @@ model = Model(DSDP.Optimizer)

MathOptInterface API

The DSDP optimizer supports the following constraints and attributes.

List of supported objective functions:

List of supported variable types:

List of supported constraint types:

List of supported model attributes:

Compile your own binaries

In order to compile your own libdsdp.so to be used of DSDP.jl, use the following

OB_DIR=$(julia --project=. -e 'import OpenBLAS32_jll; println(OpenBLAS32_jll.OpenBLAS32_jll.artifact_dir)')
 OB="-L${LIBOB_DIR}/lib -lopenblas"
 make DSDPCFLAGS="-g -Wall -fPIC -DPIC" LAPACKBLAS="$OB" dsdplibrary
-make DSDPCFLAGS="-g -Wall -fPIC -DPIC" LAPACKBLAS="$OB" SH_LD="${CC} ${CFLAGS} -Wall -fPIC -DPIC -shared $OB" oshared
+make DSDPCFLAGS="-g -Wall -fPIC -DPIC" LAPACKBLAS="$OB" SH_LD="${CC} ${CFLAGS} -Wall -fPIC -DPIC -shared $OB" oshared diff --git a/dev/packages/DiffOpt/index.html b/dev/packages/DiffOpt/index.html index 6defaa6cb6a..34e8407a410 100644 --- a/dev/packages/DiffOpt/index.html +++ b/dev/packages/DiffOpt/index.html @@ -20,4 +20,4 @@ # fetch the gradients grad_exp = MOI.get(model, DiffOpt.ReverseConstraintFunction(), cons) # -3 x - 1 constant(grad_exp) # -1 -coefficient(grad_exp, x) # -3

GSOC2020

DiffOpt began as a NumFOCUS sponsored Google Summer of Code (2020) project

+coefficient(grad_exp, x) # -3

GSOC2020

DiffOpt began as a NumFOCUS sponsored Google Summer of Code (2020) project

diff --git a/dev/packages/DisjunctiveProgramming/index.html b/dev/packages/DisjunctiveProgramming/index.html index 0adf18f9dc2..b0e419d5902 100644 --- a/dev/packages/DisjunctiveProgramming/index.html +++ b/dev/packages/DisjunctiveProgramming/index.html @@ -8,4 +8,4 @@ author={Perez, Hector D and Joshi, Shivank and Grossmann, Ignacio E}, journal={arXiv preprint arXiv:2304.10492}, year={2023} -} +} diff --git a/dev/packages/Dualization/index.html b/dev/packages/Dualization/index.html index d514f2e8b83..e7918c43df0 100644 --- a/dev/packages/Dualization/index.html +++ b/dev/packages/Dualization/index.html @@ -10,4 +10,4 @@ dual_model = dualize(model)

To solve the dual formulation of a JuMP model, create a dual_optimizer:

using JuMP, Dualization, SCS
 model = Model(dual_optimizer(SCS.Optimizer))
 # ... build model ...
-optimize!(model)  # Solves the dual instead of the primal

Documentation

The documentation for Dualization.jl includes a detailed description of the dual reformulation, along with examples and an API reference.

+optimize!(model) # Solves the dual instead of the primal

Documentation

The documentation for Dualization.jl includes a detailed description of the dual reformulation, along with examples and an API reference.

diff --git a/dev/packages/EAGO/index.html b/dev/packages/EAGO/index.html index 8fe8f95aaed..adfdec720f1 100644 --- a/dev/packages/EAGO/index.html +++ b/dev/packages/EAGO/index.html @@ -71,4 +71,4 @@ doi = {10.1080/10556788.2020.1786566}, URL = {https://doi.org/10.1080/10556788.2020.1786566}, eprint = {https://doi.org/10.1080/10556788.2020.1786566} -}

References

  1. Mitsos, A., Chachuat, B., and Barton, P.I. McCormick-based relaxations of algorithms. SIAM Journal on Optimization. 20(2): 573—601 (2009).
  2. Khan, K.A., Watson, H.A.J., and Barton, P.I. Differentiable McCormick relaxations. Journal of Global Optimization. 67(4): 687—729 (2017).
  3. Stuber, M.D., Scott, J.K., and Barton, P.I.: Convex and concave relaxations of implicit functions. Optimization Methods and Software 30(3): 424—460 (2015).
  4. Wechsung, A., Scott, J.K., Watson, H.A.J., and Barton, P.I. Reverse propagation of McCormick relaxations. Journal of Global Optimization 63(1): 1—36 (2015).
  5. Bracken, J., and McCormick, G.P. Selected Applications of Nonlinear Programming. John Wiley and Sons, New York (1968).
+}

References

  1. Mitsos, A., Chachuat, B., and Barton, P.I. McCormick-based relaxations of algorithms. SIAM Journal on Optimization. 20(2): 573—601 (2009).
  2. Khan, K.A., Watson, H.A.J., and Barton, P.I. Differentiable McCormick relaxations. Journal of Global Optimization. 67(4): 687—729 (2017).
  3. Stuber, M.D., Scott, J.K., and Barton, P.I.: Convex and concave relaxations of implicit functions. Optimization Methods and Software 30(3): 424—460 (2015).
  4. Wechsung, A., Scott, J.K., Watson, H.A.J., and Barton, P.I. Reverse propagation of McCormick relaxations. Journal of Global Optimization 63(1): 1—36 (2015).
  5. Bracken, J., and McCormick, G.P. Selected Applications of Nonlinear Programming. John Wiley and Sons, New York (1968).
diff --git a/dev/packages/ECOS/index.html b/dev/packages/ECOS/index.html index 362c7308b87..f77882a302a 100644 --- a/dev/packages/ECOS/index.html +++ b/dev/packages/ECOS/index.html @@ -6,4 +6,4 @@

ECOS.jl

Build Status codecov

ECOS.jl is a wrapper for the ECOS solver.

The wrapper has two components:

Affiliation

This wrapper is maintained by the JuMP community and is not a product of Embotech AG.

License

ECOS.jl is licensed under the MIT License.

The underlying solver, embotech/ecos, is licensed under the GPL v3 license.

Installation

Install ECOS.jl using Pkg.add:

import Pkg
 Pkg.add("ECOS")

In addition to installing the ECOS.jl package, this will also download and install the ECOS binaries. You do not need to install ECOS separately.

To use a custom binary, read the Custom solver binaries section of the JuMP documentation.

Use with JuMP

To use ECOS with JuMP, use ECOS.Optimizer:

using JuMP, ECOS
 model = Model(ECOS.Optimizer)
-set_attribute(model, "maxit", 100)

MathOptInterface API

The ECOS optimizer supports the following constraints and attributes.

List of supported objective functions:

List of supported variable types:

List of supported constraint types:

List of supported model attributes:

Options

The following options are supported:

ParameterExplanation
gammascaling the final step length
deltaregularization parameter
epsregularization threshold
feastolprimal/dual infeasibility tolerance
abstolabsolute tolerance on duality gap
reltolrelative tolerance on duality gap
feastol_inaccprimal/dual infeasibility relaxed tolerance
abstol_inaccabsolute relaxed tolerance on duality gap
reltol_inaccrelative relaxed tolerance on duality gap
nitrefnumber of iterative refinement steps
maxitmaximum number of iterations
verboseverbosity bool for PRINTLEVEL < 3
+set_attribute(model, "maxit", 100)

MathOptInterface API

The ECOS optimizer supports the following constraints and attributes.

List of supported objective functions:

List of supported variable types:

List of supported constraint types:

List of supported model attributes:

Options

The following options are supported:

ParameterExplanation
gammascaling the final step length
deltaregularization parameter
epsregularization threshold
feastolprimal/dual infeasibility tolerance
abstolabsolute tolerance on duality gap
reltolrelative tolerance on duality gap
feastol_inaccprimal/dual infeasibility relaxed tolerance
abstol_inaccabsolute relaxed tolerance on duality gap
reltol_inaccrelative relaxed tolerance on duality gap
nitrefnumber of iterative refinement steps
maxitmaximum number of iterations
verboseverbosity bool for PRINTLEVEL < 3
diff --git a/dev/packages/GAMS/index.html b/dev/packages/GAMS/index.html index a349e68b962..76fd9651a95 100644 --- a/dev/packages/GAMS/index.html +++ b/dev/packages/GAMS/index.html @@ -22,4 +22,4 @@ MOI.get(model, GAMS.GeneratedConstraintName(), c[2]) # returns eq2 MOI.get(model, GAMS.OriginalConstraintName("eq1")) # returns c[1] -MOI.get(model, GAMS.OriginalConstraintName("eq10")) # returns nothing

Note that JuMP direct-mode is used.

+MOI.get(model, GAMS.OriginalConstraintName("eq10")) # returns nothing

Note that JuMP direct-mode is used.

diff --git a/dev/packages/GLPK/index.html b/dev/packages/GLPK/index.html index b7723ddc4ee..85c740f75da 100644 --- a/dev/packages/GLPK/index.html +++ b/dev/packages/GLPK/index.html @@ -36,4 +36,4 @@ @test primal_status(model) == MOI.FEASIBLE_POINT @test value(x) == 1 @test value(y) == 2 -@show reasons

C API

The C API can be accessed via GLPK.glp_XXX functions, where the names and arguments are identical to the C API. See the /tests folder for inspiration.

Thread safety

GLPK is not thread-safe and should not be used with multithreading.

+@show reasons

C API

The C API can be accessed via GLPK.glp_XXX functions, where the names and arguments are identical to the C API. See the /tests folder for inspiration.

Thread safety

GLPK is not thread-safe and should not be used with multithreading.

diff --git a/dev/packages/Gurobi/index.html b/dev/packages/Gurobi/index.html index f4057d2fb2f..a0641ee12ba 100644 --- a/dev/packages/Gurobi/index.html +++ b/dev/packages/Gurobi/index.html @@ -169,4 +169,4 @@ println(lower_bound(x[i])) end

Common errors

Using Gurobi v9.0 and you got an error like Q not PSD?

You need to set the NonConvex parameter:

model = Model(Gurobi.Optimizer)
 set_optimizer_attribute(model, "NonConvex", 2)

Gurobi Error 1009: Version number is XX.X, license is for version XX.X

Make sure that your license is correct for your Gurobi version. See the Gurobi documentation for details.

Once you are sure that the license and Gurobi versions match, re-install Gurobi.jl by running:

import Pkg
-Pkg.build("Gurobi")
+Pkg.build("Gurobi") diff --git a/dev/packages/HiGHS/index.html b/dev/packages/HiGHS/index.html index 742c4b965dc..b517df8d948 100644 --- a/dev/packages/HiGHS/index.html +++ b/dev/packages/HiGHS/index.html @@ -3,7 +3,7 @@ function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'G-0RZ8X3D3D0', {'page_path': location.pathname + location.search + location.hash}); -

HiGHS.jl

Build Status codecov

HiGHS.jl is a wrapper for the HiGHS solver.

It has two components:

Affiliation

This wrapper is maintained by the JuMP community and is not an official project of the HiGHS developers.

Getting help

If you need help, please ask a question on the JuMP community forum.

If you have a reproducible example of a bug, please open a GitHub issue.

License

HiGHS.jl is licensed under the MIT License.

The underlying solver, ERGO-Code/HiGHS, is licensed under the MIT license.

Installation

Install HiGHS as follows:

import Pkg
+

HiGHS.jl

Build Status codecov

HiGHS.jl is a wrapper for the HiGHS solver.

It has two components:

Affiliation

This wrapper is maintained by the JuMP community and is not an official project of the HiGHS developers.

Getting help

If you need help, please ask a question on the JuMP community forum.

If you have a reproducible example of a bug, please open a GitHub issue.

License

HiGHS.jl is licensed under the MIT License.

The underlying solver, ERGO-Code/HiGHS, is licensed under the MIT license.

Installation

Install HiGHS as follows:

import Pkg
 Pkg.add("HiGHS")

In addition to installing the HiGHS.jl package, this will also download and install the HiGHS binaries. You do not need to install HiGHS separately.

To use a custom binary, read the Custom solver binaries section of the JuMP documentation.

Use with JuMP

To use HiGHS with JuMP, use HiGHS.Optimizer:

using JuMP, HiGHS
 model = Model(HiGHS.Optimizer)
 # Set options as needed, for example:
@@ -11,4 +11,4 @@
 set_attribute(model, "time_limit", 60.0)

MathOptInterface API

The HiGHS optimizer supports the following constraints and attributes.

List of supported objective functions:

List of supported variable types:

List of supported constraint types:

List of supported model attributes:

Options

See the HiGHS documentation for a full list of the available options.

C API

The C API can be accessed via HiGHS.Highs_xxx functions, where the names and arguments are identical to the C API.

Threads

HiGHS uses a global scheduler that is shared between threads.

Before changing the number of threads using MOI.Threads(), you must call Highs_resetGlobalScheduler(1):

using JuMP, HiGHS
 model = Model(HiGHS.Optimizer)
 Highs_resetGlobalScheduler(1)
-set_attribute(model, MOI.NumberOfThreads(), 1)

If modifying the number of HiGHS threads across different Julia threads, be sure to read the docstring of Highs_resetGlobalScheduler. In particular, resetting the scheduler is not thread-safe.

+set_attribute(model, MOI.NumberOfThreads(), 1)

If modifying the number of HiGHS threads across different Julia threads, be sure to read the docstring of Highs_resetGlobalScheduler. In particular, resetting the scheduler is not thread-safe.

diff --git a/dev/packages/Hypatia/index.html b/dev/packages/Hypatia/index.html index 980bc63e239..04171508735 100644 --- a/dev/packages/Hypatia/index.html +++ b/dev/packages/Hypatia/index.html @@ -42,4 +42,4 @@ volume={15}, pages={53--101}, doi={https://doi.org/10.1007/s12532-022-00226-0} -} +} diff --git a/dev/packages/InfiniteOpt/index.html b/dev/packages/InfiniteOpt/index.html index ca2e39ad627..1150abc8569 100644 --- a/dev/packages/InfiniteOpt/index.html +++ b/dev/packages/InfiniteOpt/index.html @@ -12,4 +12,4 @@ doi = {https://doi.org/10.1016/j.compchemeng.2021.107567}, url = {https://www.sciencedirect.com/science/article/pii/S0098135421003458}, author = {Joshua L. Pulsipher and Weiqi Zhang and Tyler J. Hongisto and Victor M. Zavala}, -}

A pre-print version is freely available though arXiv.

+}

A pre-print version is freely available though arXiv.

diff --git a/dev/packages/Ipopt/index.html b/dev/packages/Ipopt/index.html index 38a25da97e2..03dfd58afaf 100644 --- a/dev/packages/Ipopt/index.html +++ b/dev/packages/Ipopt/index.html @@ -124,4 +124,4 @@ LinearAlgebra.BLAS.lbt_forward(liblapack32) using Ipopt

AppleAccelerate

If you are using macOS ≥ v13.4 and you have AppleAccelerate.jl installed, add using AppleAccelerate to your code:

using AppleAccelerate
 using Ipopt

Display backends

Check what backends are loaded using:

import LinearAlgebra
-LinearAlgebra.BLAS.lbt_get_config()
+LinearAlgebra.BLAS.lbt_get_config() diff --git a/dev/packages/Juniper/index.html b/dev/packages/Juniper/index.html index 6487e3f3365..8c6adfc8a85 100644 --- a/dev/packages/Juniper/index.html +++ b/dev/packages/Juniper/index.html @@ -33,4 +33,4 @@ year="2018", publisher="Springer International Publishing", isbn="978-3-319-93031-2" -} +} diff --git a/dev/packages/KNITRO/index.html b/dev/packages/KNITRO/index.html index deef76c472b..f7b981fdecc 100644 --- a/dev/packages/KNITRO/index.html +++ b/dev/packages/KNITRO/index.html @@ -10,4 +10,4 @@ set_attribute(model, "algorithm", 4)

Use with AMPL

To use KNITRO with AmplNLWriter.jl, use KNITRO.amplexe:

using JuMP
 import AmplNLWriter
 import KNITRO
-model = Model(() -> AmplNLWriter.Optimizer(KNITRO.amplexe, ["outlev=3"]))

Use with other packages

A variety of packages extend KNITRO.jl to support other optimization modeling systems. These include:

MathOptInterface API

The Knitro optimizer supports the following constraints and attributes.

List of supported objective functions:

List of supported variable types:

List of supported constraint types:

List of supported model attributes:

Options

A list of available options is provided in the KNITRO reference manual.

Low-level wrapper

The complete C API can be accessed via KNITRO.KN_xx functions, where the names and arguments are identical to the C API.

See the KNITRO documentation for details.

As general rules when converting from Julia to C:

Extensive examples using the C wrapper can be found in examples/.

Breaking changes

KNITRO.jl v0.14.0 introduced a number of breaking changes to the low-level C API. The main changes were:

  1. removing Julia-specific functions like KN_set_param. Use the C API functions like KN_set_int_param and KN_set_double_param_by_name.
  2. removing intermediate methods that tried to make the C API more Julia-like. For example, we have removed the KN_add_var method that returned the index of the variable. There is now only the method from the C API.

If you have trouble updating, please open a GitHub issue.

Multi-threading

Due to limitations in the interaction between Julia and C, KNITRO.jl disables multi-threading if the problem is nonlinear. This will override any options such as par_numthreads that you may have set.

If you are using the low-level API, opt-in to enable multi-threading by calling KN_solve(model.env) instead of KN_solve(model), where model is the value returned by model = KN_new(). Note that calling KN_solve(model.env) is an advanced operation because it requires all callbacks you provide to be threadsafe.

Read GitHub issue #93 for more details.

+model = Model(() -> AmplNLWriter.Optimizer(KNITRO.amplexe, ["outlev=3"]))

Use with other packages

A variety of packages extend KNITRO.jl to support other optimization modeling systems. These include:

MathOptInterface API

The Knitro optimizer supports the following constraints and attributes.

List of supported objective functions:

List of supported variable types:

List of supported constraint types:

List of supported model attributes:

Options

A list of available options is provided in the KNITRO reference manual.

Low-level wrapper

The complete C API can be accessed via KNITRO.KN_xx functions, where the names and arguments are identical to the C API.

See the KNITRO documentation for details.

As general rules when converting from Julia to C:

Extensive examples using the C wrapper can be found in examples/.

Breaking changes

KNITRO.jl v0.14.0 introduced a number of breaking changes to the low-level C API. The main changes were:

  1. removing Julia-specific functions like KN_set_param. Use the C API functions like KN_set_int_param and KN_set_double_param_by_name.
  2. removing intermediate methods that tried to make the C API more Julia-like. For example, we have removed the KN_add_var method that returned the index of the variable. There is now only the method from the C API.

If you have trouble updating, please open a GitHub issue.

Multi-threading

Due to limitations in the interaction between Julia and C, KNITRO.jl disables multi-threading if the problem is nonlinear. This will override any options such as par_numthreads that you may have set.

If you are using the low-level API, opt-in to enable multi-threading by calling KN_solve(model.env) instead of KN_solve(model), where model is the value returned by model = KN_new(). Note that calling KN_solve(model.env) is an advanced operation because it requires all callbacks you provide to be threadsafe.

Read GitHub issue #93 for more details.

diff --git a/dev/packages/Loraine/index.html b/dev/packages/Loraine/index.html index 949d9e596b1..ac4388a2ed8 100644 --- a/dev/packages/Loraine/index.html +++ b/dev/packages/Loraine/index.html @@ -37,4 +37,4 @@ www={https://hal.science/hal-04076509/} note={Preprint hal-04076509} year={2023} -}
+}
diff --git a/dev/packages/MAiNGO/index.html b/dev/packages/MAiNGO/index.html index 2fc905dec82..ec13b77d1a2 100644 --- a/dev/packages/MAiNGO/index.html +++ b/dev/packages/MAiNGO/index.html @@ -94,4 +94,4 @@ findMAiNGO(preferred=MAiNGO.C_API) # switch back to MAiNGO_jll findMAiNGO(preferred=MAiNGO.MAINGO_JLL)

The findMAiNGO() function takes several optional arguments, which can be passed as keyword-arguments:

For example, to use the C-API at a new location, one could call:

using MAiNGO
-findMAiNGO(preferred=MAiNGO.C_API, c_api="path\\to\\c\\api\\shared_parser.dll")

Currently working:

Restrictions compared to using the Python or C++ interface

It is assumed that all variables are bounded. This interface assumes that integer variables are bounded between -1e6 and 1e6. For real variables these bounds are -1e8 and 1e8.

Other functionality such as special support for growing datasets or MPI parallelization is not currently supported via this wrapper. Additionally, constraint formulations are simply passed from their representation in JuMP/MathOptInterface to MAiNGO. As such, there is no way to make use of advanced techniques such as defining constraints that are only used for the relaxations, using special relaxations for functions used in thermodynamics and process engineering or formulating reduced space formulations.

Tests

A subset of test cases for MathOptInterface solvers can be run by running the script ./test/runtests.jl. The current release was tested in the following combinations:

+findMAiNGO(preferred=MAiNGO.C_API, c_api="path\\to\\c\\api\\shared_parser.dll")

Currently working:

Restrictions compared to using the Python or C++ interface

It is assumed that all variables are bounded. This interface assumes that integer variables are bounded between -1e6 and 1e6. For real variables these bounds are -1e8 and 1e8.

Other functionality such as special support for growing datasets or MPI parallelization is not currently supported via this wrapper. Additionally, constraint formulations are simply passed from their representation in JuMP/MathOptInterface to MAiNGO. As such, there is no way to make use of advanced techniques such as defining constraints that are only used for the relaxations, using special relaxations for functions used in thermodynamics and process engineering or formulating reduced space formulations.

Tests

A subset of test cases for MathOptInterface solvers can be run by running the script ./test/runtests.jl. The current release was tested in the following combinations:

diff --git a/dev/packages/MadNLP/index.html b/dev/packages/MadNLP/index.html index 91bb1be2fec..75bad61e6c3 100644 --- a/dev/packages/MadNLP/index.html +++ b/dev/packages/MadNLP/index.html @@ -43,4 +43,4 @@ author={Shin, Sungho and Coffrin, Carleton and Sundar, Kaarthik and Zavala, Victor M}, journal={arXiv preprint arXiv:2010.02404}, year={2020} -}

Supporting MadNLP.jl

+}

Supporting MadNLP.jl

diff --git a/dev/packages/Manopt/index.html b/dev/packages/Manopt/index.html index 23125ff23ff..914702f18ae 100644 --- a/dev/packages/Manopt/index.html +++ b/dev/packages/Manopt/index.html @@ -30,4 +30,4 @@ TITLE = {Manifolds.jl: An Extensible Julia Framework for Data Analysis on Manifolds}, VOLUME = {49}, YEAR = {2023} -}

as well. Note that all citations are in BibLaTeX format.

Manopt.jl belongs to the Manopt family:

Did you use Manopt.jl somewhere? Let us know! We'd love to collect those here as well.

+}

as well. Note that all citations are in BibLaTeX format.

Manopt.jl belongs to the Manopt family:

Did you use Manopt.jl somewhere? Let us know! We'd love to collect those here as well.

diff --git a/dev/packages/MathOptAI/index.html b/dev/packages/MathOptAI/index.html index 91f96c6c41a..68295971c6c 100644 --- a/dev/packages/MathOptAI/index.html +++ b/dev/packages/MathOptAI/index.html @@ -31,4 +31,4 @@ moai_SoftMax[7] moai_SoftMax[8] moai_SoftMax[9] - moai_SoftMax[10]

Documentation

Documentation is available at https://lanl-ansi.github.io/MathOptAI.jl.

Getting help

For help, questions, comments, and suggestions, please open a GitHub issue.

Inspiration

This project is mainly inspired by two existing projects:

Other works, from which we took less inspiration, include:

The 2024 paper of López-Flores et al. is an excellent summary of the state of the field at the time that we started development of MathOptAI.

López-Flores, F.J., Ramírez-Márquez, C., Ponce-Ortega J.M. (2024). Process Systems Engineering Tools for Optimization of Trained Machine Learning Models: Comparative and Perspective. Industrial & Engineering Chemistry Research, 63(32), 13966-13979. DOI: 10.1021/acs.iecr.4c00632

+ moai_SoftMax[10]

Documentation

Documentation is available at https://lanl-ansi.github.io/MathOptAI.jl.

Getting help

For help, questions, comments, and suggestions, please open a GitHub issue.

Inspiration

This project is mainly inspired by two existing projects:

Other works, from which we took less inspiration, include:

The 2024 paper of López-Flores et al. is an excellent summary of the state of the field at the time that we started development of MathOptAI.

López-Flores, F.J., Ramírez-Márquez, C., Ponce-Ortega J.M. (2024). Process Systems Engineering Tools for Optimization of Trained Machine Learning Models: Comparative and Perspective. Industrial & Engineering Chemistry Research, 63(32), 13966-13979. DOI: 10.1021/acs.iecr.4c00632

diff --git a/dev/packages/MathOptSymbolicAD/index.html b/dev/packages/MathOptSymbolicAD/index.html index 6551d7ba184..cb0d50fc23f 100644 --- a/dev/packages/MathOptSymbolicAD/index.html +++ b/dev/packages/MathOptSymbolicAD/index.html @@ -18,4 +18,4 @@ optimize!(model)

Background

MathOptSymbolicAD is inspired by Hassan Hijazi's work on coin-or/gravity, a high-performance algebraic modeling language in C++.

Hassan made the following observations:

The symbolic differentiation approach of Gravity works well when the problem is large with few unique constraints. For example, a model like:

model = Model()
 @variable(model, 0 <= x[1:10_000] <= 1)
 @constraint(model, [i=1:10_000], sin(x[i]) <= 1)
-@objective(model, Max, sum(x))

is ideal, because although the Jacobian matrix has 10,000 rows, we can compute the derivative of sin(x[i]) as cos(x[i]), and then fill in the Jacobian by evaluating the derivative function instead of having to differentiation 10,000 expressions.

The symbolic differentiation approach of Gravity works poorly if there are a large number of unique constraints in the model (which would require a lot of expressions to be symbolically differentiated), or if the nonlinear functions contain a large number of nonlinear terms (which would make the symbolic derivative expensive to compute).

For more details, see Oscar's JuMP-dev 2022 talk, although note that the syntax has changed since the original recording.

+@objective(model, Max, sum(x))

is ideal, because although the Jacobian matrix has 10,000 rows, we can compute the derivative of sin(x[i]) as cos(x[i]), and then fill in the Jacobian by evaluating the derivative function instead of having to differentiation 10,000 expressions.

The symbolic differentiation approach of Gravity works poorly if there are a large number of unique constraints in the model (which would require a lot of expressions to be symbolically differentiated), or if the nonlinear functions contain a large number of nonlinear terms (which would make the symbolic derivative expensive to compute).

For more details, see Oscar's JuMP-dev 2022 talk, although note that the syntax has changed since the original recording.

diff --git a/dev/packages/MiniZinc/index.html b/dev/packages/MiniZinc/index.html index 79b28218380..8ab75b7a4ae 100644 --- a/dev/packages/MiniZinc/index.html +++ b/dev/packages/MiniZinc/index.html @@ -53,4 +53,4 @@ @constraint(model, x in MOI.AllDifferent(3)) @objective(model, Max, sum(i * x[i] for i in 1:3)) optimize!(model) -@show value.(x)

MathOptInterface API

The MiniZinc Optimizer{T} supports the following constraints and attributes.

List of supported objective functions:

List of supported variable types:

List of supported constraint types:

List of supported model attributes:

Options

Set options using MOI.RawOptimizerAttribute in MOI or set_attribute in JuMP.

MiniZinc.jl supports the following options:

+@show value.(x)

MathOptInterface API

The MiniZinc Optimizer{T} supports the following constraints and attributes.

List of supported objective functions:

List of supported variable types:

List of supported constraint types:

List of supported model attributes:

Options

Set options using MOI.RawOptimizerAttribute in MOI or set_attribute in JuMP.

MiniZinc.jl supports the following options:

diff --git a/dev/packages/MosekTools/index.html b/dev/packages/MosekTools/index.html index 53542e51e1b..63397a6d3df 100644 --- a/dev/packages/MosekTools/index.html +++ b/dev/packages/MosekTools/index.html @@ -7,4 +7,4 @@ using MosekTools model = Model(Mosek.Optimizer) set_attribute(model, "QUIET", true) -set_attribute(model, "INTPNT_CO_TOL_DFEAS", 1e-7)

Options

The parameter QUIET is a special parameter that when set to true disables all Mosek printing output.

All other parameters can be found in the Mosek documentation.

Note that the prefix MSK_IPAR_ (for integer parameters), MSK_DPAR_ (for floating point parameters) or MSK_SPAR_ (for string parameters) are optional. If they are not given, they are inferred from the type of the value. For example, in the example above, as 1e-7 is a floating point number, the parameters name used is MSK_DPAR_INTPNT_CO_TOL_DFEAS.

+set_attribute(model, "INTPNT_CO_TOL_DFEAS", 1e-7)

Options

The parameter QUIET is a special parameter that when set to true disables all Mosek printing output.

All other parameters can be found in the Mosek documentation.

Note that the prefix MSK_IPAR_ (for integer parameters), MSK_DPAR_ (for floating point parameters) or MSK_SPAR_ (for string parameters) are optional. If they are not given, they are inferred from the type of the value. For example, in the example above, as 1e-7 is a floating point number, the parameters name used is MSK_DPAR_INTPNT_CO_TOL_DFEAS.

diff --git a/dev/packages/MultiObjectiveAlgorithms/index.html b/dev/packages/MultiObjectiveAlgorithms/index.html index 2e51eefcfb6..7146fd0caa1 100644 --- a/dev/packages/MultiObjectiveAlgorithms/index.html +++ b/dev/packages/MultiObjectiveAlgorithms/index.html @@ -9,4 +9,4 @@ import MultiObjectiveAlgorithms as MOA model = JuMP.Model(() -> MOA.Optimizer(HiGHS.Optimizer)) set_attribute(model, MOA.Algorithm(), MOA.Dichotomy()) -set_attribute(model, MOA.SolutionLimit(), 4)

Replace HiGHS.Optimizer with an optimizer capable of solving a single-objective instance of your optimization problem.

You may set additional optimizer attributes, the supported attributes depend on the choice of solution algorithm.

Algorithm

Set the algorithm using the MOA.Algorithm() attribute.

The value must be one of the algorithms supported by MOA:

Consult their docstrings for details.

Other optimizer attributes

There are a number of optimizer attributes supported by the algorithms in MOA.

Each algorithm supports only a subset of the attributes. Consult the algorithm's docstring for details on which attributes it supports, and how it uses them in the solution process.

+set_attribute(model, MOA.SolutionLimit(), 4)

Replace HiGHS.Optimizer with an optimizer capable of solving a single-objective instance of your optimization problem.

You may set additional optimizer attributes, the supported attributes depend on the choice of solution algorithm.

Algorithm

Set the algorithm using the MOA.Algorithm() attribute.

The value must be one of the algorithms supported by MOA:

Consult their docstrings for details.

Other optimizer attributes

There are a number of optimizer attributes supported by the algorithms in MOA.

Each algorithm supports only a subset of the attributes. Consult the algorithm's docstring for details on which attributes it supports, and how it uses them in the solution process.

diff --git a/dev/packages/NEOSServer/index.html b/dev/packages/NEOSServer/index.html index 9ff4438fea4..5073c368f84 100644 --- a/dev/packages/NEOSServer/index.html +++ b/dev/packages/NEOSServer/index.html @@ -27,4 +27,4 @@ results = neos_getFinalResults(server, job)

Use with JuMP

Use NEOSServer.jl with JuMP as follows:

using JuMP, NEOSServer
 model = Model() do
     return NEOSServer.Optimizer(; email = "me@mydomain.com", solver = "Ipopt")
-end

Note: NEOSServer.Optimizer is limited to the following solvers:

NEOS Limits

NEOS currently limits jobs to an 8 hour time limit, 3 GB of memory, and a 16 MB submission file. If your model exceeds these limits, NEOSServer.jl may be unable to return useful information to the user.

+end

Note: NEOSServer.Optimizer is limited to the following solvers:

NEOS Limits

NEOS currently limits jobs to an 8 hour time limit, 3 GB of memory, and a 16 MB submission file. If your model exceeds these limits, NEOSServer.jl may be unable to return useful information to the user.

diff --git a/dev/packages/NLopt/index.html b/dev/packages/NLopt/index.html index c48c8919f60..5d335633041 100644 --- a/dev/packages/NLopt/index.html +++ b/dev/packages/NLopt/index.html @@ -120,4 +120,4 @@ opt = Opt(:LD_MMA, 2) # Define problem solutions[i] = optimize(opt, rand(2)) -end

Author

This module was initially written by Steven G. Johnson, with subsequent contributions by several other authors (see the git history).

+end

Author

This module was initially written by Steven G. Johnson, with subsequent contributions by several other authors (see the git history).

diff --git a/dev/packages/OSQP/index.html b/dev/packages/OSQP/index.html index 7e2d2941d2d..da4d028e9cc 100644 --- a/dev/packages/OSQP/index.html +++ b/dev/packages/OSQP/index.html @@ -6,4 +6,4 @@

OSQP.jl

Build Status codecov.io

OSQP.jl is a Julia wrapper for OSQP: the Operator Splitting QP Solver.

License

OSQP.jl is licensed under the Apache-2.0 license.

The upstream solver, osqp/osqp is also licensed under the Apache-2.0 license.

Installation

Install OSQP.jl using the Julia package manager

import Pkg
 Pkg.add("OSQP")

Problem class

The OSQP (Operator Splitting Quadratic Program) solver is a numerical optimization package for solving problems in the form

minimize        0.5 x' P x + q' x
 
-subject to      l <= A x <= u

where x in R^n is the optimization variable. The objective function is defined by a positive semidefinite matrix P in S^n_+ and vector q in R^n. The linear constraints are defined by matrix A in R^{m x n} and vectors l in R^m U {-inf}^m, u in R^m U {+inf}^m.

Documentation

Detailed documentation is available at https://osqp.org/.

+subject to l <= A x <= u

where x in R^n is the optimization variable. The objective function is defined by a positive semidefinite matrix P in S^n_+ and vector q in R^n. The linear constraints are defined by matrix A in R^{m x n} and vectors l in R^m U {-inf}^m, u in R^m U {+inf}^m.

Documentation

Detailed documentation is available at https://osqp.org/.

diff --git a/dev/packages/Optim/index.html b/dev/packages/Optim/index.html index 53cf0aec7c4..a41b93fb2a1 100644 --- a/dev/packages/Optim/index.html +++ b/dev/packages/Optim/index.html @@ -105,4 +105,4 @@ number = {24}, pages = {615}, doi = {10.21105/joss.00615} -} +} diff --git a/dev/packages/PATHSolver/index.html b/dev/packages/PATHSolver/index.html index 93c06e69cb1..7e5307a4c61 100644 --- a/dev/packages/PATHSolver/index.html +++ b/dev/packages/PATHSolver/index.html @@ -165,4 +165,4 @@ 0.8 1.2

Thread safety

PATH is not thread-safe and there are no known work-arounds. Do not run it in parallel using Threads.@threads. See issue #62 for more details.

Factorization methods

By default, PATHSolver.jl will download the LUSOL shared library. To use LUSOL, set the following options:

model = Model(PATHSolver.Optimizer)
 set_optimizer_attribute(model, "factorization_method", "blu_lusol")
-set_optimizer_attribute(model, "factorization_library_name", PATHSolver.LUSOL_LIBRARY_PATH)

To use factorization_method umfpack you will need the umfpack shared library that is available directly from the developers of that code for academic use.

Manual installation

By default PATHSolver.jl will download a copy of the libpath library. If you already have one installed and want to use that, set the PATH_JL_LOCATION environment variable to point to the libpath50.xx library.

+set_optimizer_attribute(model, "factorization_library_name", PATHSolver.LUSOL_LIBRARY_PATH)

To use factorization_method umfpack you will need the umfpack shared library that is available directly from the developers of that code for academic use.

Manual installation

By default PATHSolver.jl will download a copy of the libpath library. If you already have one installed and want to use that, set the PATH_JL_LOCATION environment variable to point to the libpath50.xx library.

diff --git a/dev/packages/Pajarito/index.html b/dev/packages/Pajarito/index.html index eff081344c2..d0f4eedb9dc 100644 --- a/dev/packages/Pajarito/index.html +++ b/dev/packages/Pajarito/index.html @@ -27,4 +27,4 @@ pages={249--293}, year={2020}, publisher={Springer} -}

Note this paper describes a legacy MathProgBase version of Pajarito, which is available on the mathprogbase branch of this repository. Starting with version v0.8.0, Pajarito supports MathOptInterface instead of MathProgBase.

+}

Note this paper describes a legacy MathProgBase version of Pajarito, which is available on the mathprogbase branch of this repository. Starting with version v0.8.0, Pajarito supports MathOptInterface instead of MathProgBase.

diff --git a/dev/packages/ParametricOptInterface/index.html b/dev/packages/ParametricOptInterface/index.html index 90755a1ca81..8c470f82e3b 100644 --- a/dev/packages/ParametricOptInterface/index.html +++ b/dev/packages/ParametricOptInterface/index.html @@ -13,4 +13,4 @@ @objective(model, Min, 2x) optimize!(model) MOI.set(model, POI.ParameterValue(), p, 2.0) -optimize!(model)

GSOC2020

ParametricOptInterface began as a NumFOCUS sponsored Google Summer of Code (2020) project.

+optimize!(model)

GSOC2020

ParametricOptInterface began as a NumFOCUS sponsored Google Summer of Code (2020) project.

diff --git a/dev/packages/Pavito/index.html b/dev/packages/Pavito/index.html index b2a820e4843..dfdc2b4c4e4 100644 --- a/dev/packages/Pavito/index.html +++ b/dev/packages/Pavito/index.html @@ -13,4 +13,4 @@ "cont_solver" => optimizer_with_attributes(Ipopt.Optimizer, "print_level" => 0), ), -)

The algorithm implemented by Pavito itself is relatively simple; most of the hard work is performed by the MILP solver passed as mip_solver and the NLP solver passed as cont_solver.

The performance of Pavito depends on these two types of solvers.

For better performance, you should use a commercial MILP solver such as CPLEX or Gurobi.

Options

The following optimizer attributes can set to a Pavito.Optimizer to modify its behavior:

Pavito is not yet numerically robust and may require tuning of parameters to improve convergence.

If the default parameters don't work for you, please let us know by opening an issue.

For improved Pavito performance, MILP solver integrality tolerance and feasibility tolerances should typically be tightened, for example to 1e-8.

Bug reports and support

Please report any issues via the GitHub issue tracker. All types of issues are welcome and encouraged; this includes bug reports, documentation typos, feature requests, etc. The Optimization (Mathematical) category on Discourse is appropriate for general discussion.

+)

The algorithm implemented by Pavito itself is relatively simple; most of the hard work is performed by the MILP solver passed as mip_solver and the NLP solver passed as cont_solver.

The performance of Pavito depends on these two types of solvers.

For better performance, you should use a commercial MILP solver such as CPLEX or Gurobi.

Options

The following optimizer attributes can set to a Pavito.Optimizer to modify its behavior:

Pavito is not yet numerically robust and may require tuning of parameters to improve convergence.

If the default parameters don't work for you, please let us know by opening an issue.

For improved Pavito performance, MILP solver integrality tolerance and feasibility tolerances should typically be tightened, for example to 1e-8.

Bug reports and support

Please report any issues via the GitHub issue tracker. All types of issues are welcome and encouraged; this includes bug reports, documentation typos, feature requests, etc. The Optimization (Mathematical) category on Discourse is appropriate for general discussion.

diff --git a/dev/packages/Percival/index.html b/dev/packages/Percival/index.html index f68b16b433b..2f00a4ed2f2 100644 --- a/dev/packages/Percival/index.html +++ b/dev/packages/Percival/index.html @@ -22,4 +22,4 @@ [1.0], [1.0], ) -output = percival(nlp, verbose = 1)

Bug reports and discussions

If you think you found a bug, feel free to open an issue. Focused suggestions and requests can also be opened as issues. Before opening a pull request, start an issue or a discussion on the topic, please.

If you want to ask a question not suited for a bug report, feel free to start a discussion here. This forum is for general discussion about this repository and the JuliaSmoothOptimizers, so questions about any of our packages are welcome.

+output = percival(nlp, verbose = 1)

Bug reports and discussions

If you think you found a bug, feel free to open an issue. Focused suggestions and requests can also be opened as issues. Before opening a pull request, start an issue or a discussion on the topic, please.

If you want to ask a question not suited for a bug report, feel free to start a discussion here. This forum is for general discussion about this repository and the JuliaSmoothOptimizers, so questions about any of our packages are welcome.

diff --git a/dev/packages/PiecewiseLinearOpt/index.html b/dev/packages/PiecewiseLinearOpt/index.html index 8891eac6f1c..ce3729b5cef 100644 --- a/dev/packages/PiecewiseLinearOpt/index.html +++ b/dev/packages/PiecewiseLinearOpt/index.html @@ -41,4 +41,4 @@ (u, v) -> exp(u + v); method = :DisaggLogarithmic, ) -@objective(model, Min, z)

Methods

Supported univariate formulations:

Supported bivariate formulations for entire constraint:

Also, you can use any univariate formulation for bivariate functions as well. They will be used to impose two axis-aligned SOS2 constraints, along with the "6-stencil" formulation for the triangle selection portion of the constraint. See the associated paper for more details. In particular, the following are also acceptable bivariate formulation choices:

+@objective(model, Min, z)

Methods

Supported univariate formulations:

Supported bivariate formulations for entire constraint:

Also, you can use any univariate formulation for bivariate functions as well. They will be used to impose two axis-aligned SOS2 constraints, along with the "6-stencil" formulation for the triangle selection portion of the constraint. See the associated paper for more details. In particular, the following are also acceptable bivariate formulation choices:

diff --git a/dev/packages/Plasmo/index.html b/dev/packages/Plasmo/index.html index dd9fec16f65..00b2fef3c09 100644 --- a/dev/packages/Plasmo/index.html +++ b/dev/packages/Plasmo/index.html @@ -50,4 +50,4 @@ volume = {125}, year = {2019}, doi = {10.1016/j.compchemeng.2019.03.009} -}

A pre-print of this paper can be found here

+}

A pre-print of this paper can be found here

diff --git a/dev/packages/PolyJuMP/index.html b/dev/packages/PolyJuMP/index.html index 475feb58dec..6d8f96a21b5 100644 --- a/dev/packages/PolyJuMP/index.html +++ b/dev/packages/PolyJuMP/index.html @@ -17,4 +17,4 @@ model = Model(optimizer_with_attributes( PolyJuMP.KKT.Optimizer, "solver" => HomotopyContinuation.SemialgebraicSetsHCSolver(), -))

Documentation

Documentation for PolyJuMP.jl is included in the documentation for SumOfSquares.jl.

+))

Documentation

Documentation for PolyJuMP.jl is included in the documentation for SumOfSquares.jl.

diff --git a/dev/packages/ProxSDP/index.html b/dev/packages/ProxSDP/index.html index f3119395591..90a2361f7b6 100644 --- a/dev/packages/ProxSDP/index.html +++ b/dev/packages/ProxSDP/index.html @@ -56,4 +56,4 @@ publisher = {Taylor & Francis}, doi = {10.1080/02331934.2020.1823387}, URL = {https://doi.org/10.1080/02331934.2020.1823387} -}

The preprint version of the paper can be found here.

Disclaimer

ROAD MAP

+}

The preprint version of the paper can be found here.

Disclaimer

ROAD MAP

diff --git a/dev/packages/SCIP/index.html b/dev/packages/SCIP/index.html index 9ff5a705a02..f2891208641 100644 --- a/dev/packages/SCIP/index.html +++ b/dev/packages/SCIP/index.html @@ -14,4 +14,4 @@ julia> Pkg.build("SCIP")

Use with JuMP

Use SCIP with JuMP as follows:

using JuMP, SCIP
 model = Model(SCIP.Optimizer)
 set_attribute(model, "display/verblevel", 0)
-set_attribute(model, "limits/gap", 0.05)

Options

See the SCIP documentation for a list of supported options.

MathOptInterface API

The SCIP optimizer supports the following constraints and attributes.

List of supported objective functions:

List of supported variable types:

List of supported constraint types:

List of supported model attributes:

Design considerations

Wrapping the public API

All of the public API methods are wrapped and available within the SCIP package. This includes the scip_*.h and pub_*.h headers that are collected in scip.h, as well as all default constraint handlers (cons_*.h.)

The wrapped functions do not transform any data structures and work on the raw pointers (for example, SCIP* in C, Ptr{SCIP_} in Julia). Convenience wrapper functions based on Julia types are added as needed.

Memory management

Programming with SCIP requires dealing with variable and constraint objects that use reference counting for memory management.

The SCIP.Optimizer wrapper type collects lists of SCIP_VAR* and SCIP_CONS* under the hood, and it releases all references when it is garbage collected itself (via finalize).

When adding a variable (add_variable) or a constraint (add_linear_constraint), an integer index is returned. This index can be used to retrieve the SCIP_VAR* or SCIP_CONS* pointer via get_var and get_cons respectively.

Supported nonlinear operators

Supported operators in nonlinear expressions are as follows:

+set_attribute(model, "limits/gap", 0.05)

Options

See the SCIP documentation for a list of supported options.

MathOptInterface API

The SCIP optimizer supports the following constraints and attributes.

List of supported objective functions:

List of supported variable types:

List of supported constraint types:

List of supported model attributes:

Design considerations

Wrapping the public API

All of the public API methods are wrapped and available within the SCIP package. This includes the scip_*.h and pub_*.h headers that are collected in scip.h, as well as all default constraint handlers (cons_*.h.)

The wrapped functions do not transform any data structures and work on the raw pointers (for example, SCIP* in C, Ptr{SCIP_} in Julia). Convenience wrapper functions based on Julia types are added as needed.

Memory management

Programming with SCIP requires dealing with variable and constraint objects that use reference counting for memory management.

The SCIP.Optimizer wrapper type collects lists of SCIP_VAR* and SCIP_CONS* under the hood, and it releases all references when it is garbage collected itself (via finalize).

When adding a variable (add_variable) or a constraint (add_linear_constraint), an integer index is returned. This index can be used to retrieve the SCIP_VAR* or SCIP_CONS* pointer via get_var and get_cons respectively.

Supported nonlinear operators

Supported operators in nonlinear expressions are as follows:

diff --git a/dev/packages/SCS/index.html b/dev/packages/SCS/index.html index 499f604ec7e..16ba5ebb184 100644 --- a/dev/packages/SCS/index.html +++ b/dev/packages/SCS/index.html @@ -53,4 +53,4 @@ julia> SCS.is_available(SCS.GpuIndirectSolver) true

The GpuIndirectSolver is available on Linux x86_64 platform only.

Low-level wrapper

SCS.jl provides a low-level interface to solve a problem directly, without interfacing through MathOptInterface.

This is an advanced interface with a risk of incorrect usage. For new users, we recommend that you use the JuMP or Convex interfaces instead.

SCS solves a problem of the form:

minimize        1/2 * x' * P * x + c' * x
 subject to      A * x + s = b
-                s in K

where K is a product cone of:

To solve this problem with SCS, call SCS.scs_solve; see the docstring for details.

+ s in K

where K is a product cone of:

To solve this problem with SCS, call SCS.scs_solve; see the docstring for details.

diff --git a/dev/packages/SDDP/index.html b/dev/packages/SDDP/index.html index f7f3b487ed6..808ff23d5a2 100644 --- a/dev/packages/SDDP/index.html +++ b/dev/packages/SDDP/index.html @@ -3,4 +3,4 @@ function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'G-0RZ8X3D3D0', {'page_path': location.pathname + location.search + location.hash}); -
+
diff --git a/dev/packages/SDPA/index.html b/dev/packages/SDPA/index.html index b61bb690c1a..17ba8a5b22b 100644 --- a/dev/packages/SDPA/index.html +++ b/dev/packages/SDPA/index.html @@ -13,4 +13,4 @@ set_attribute(model, "Mode", SDPA.PARAMETER_STABLE_BUT_SLOW)

Note that the parameters are set in the order they are given, so you can set a mode and then modify parameters from this mode.

using JuMP, SDPA
 model = Model(SDPA.Optimizer)
 set_attribute(model, "Mode", SDPA.PARAMETER_STABLE_BUT_SLOW)
-set_attribute(model, "MaxIteration", 100)

The choice of parameter mode has a large impact on the performance and stability of SDPA, and not necessarily in the way implied by the names of the modes; for example, PARAMETER_UNSTABLE_BUT_FAST can be more stable than the other modes for some problems. You should try each mode to see how it performs on your specific problem. See SDPA.jl#17 for more details.

+set_attribute(model, "MaxIteration", 100)

The choice of parameter mode has a large impact on the performance and stability of SDPA, and not necessarily in the way implied by the names of the modes; for example, PARAMETER_UNSTABLE_BUT_FAST can be more stable than the other modes for some problems. You should try each mode to see how it performs on your specific problem. See SDPA.jl#17 for more details.

diff --git a/dev/packages/SDPLR/index.html b/dev/packages/SDPLR/index.html index fed52a22813..fd0bcc63b06 100644 --- a/dev/packages/SDPLR/index.html +++ b/dev/packages/SDPLR/index.html @@ -50,4 +50,4 @@ sigma *= 2 end lambdaupdate = 0 -end +end diff --git a/dev/packages/SDPNAL/index.html b/dev/packages/SDPNAL/index.html index 52bc57ee00a..b415f813c04 100644 --- a/dev/packages/SDPNAL/index.html +++ b/dev/packages/SDPNAL/index.html @@ -18,4 +18,4 @@ '/path/to/SDPNALv1.0/solver:', ... '/path/to/SDPNALv1.0/solver_main_default:', ... '/path/to/SDPNALv1.0/util:', ... -% (...)

If you have SDPT3 in addition to SDPNAL in the MATLAB path (that is, the toolbox/local/pathdef.m file) then you might have issues because both solvers define a validate function, and this might make SDPNAL call SDPT3's validate function instead of SDPT3's validate function.

+% (...)

If you have SDPT3 in addition to SDPNAL in the MATLAB path (that is, the toolbox/local/pathdef.m file) then you might have issues because both solvers define a validate function, and this might make SDPNAL call SDPT3's validate function instead of SDPT3's validate function.

diff --git a/dev/packages/SDPT3/index.html b/dev/packages/SDPT3/index.html index e7c2388d435..a89d73408ea 100644 --- a/dev/packages/SDPT3/index.html +++ b/dev/packages/SDPT3/index.html @@ -29,4 +29,4 @@ julia> MATLAB.restoredefaultpath() -julia> MATLAB.mat"savepath" +julia> MATLAB.mat"savepath" diff --git a/dev/packages/SeDuMi/index.html b/dev/packages/SeDuMi/index.html index 447902d4917..a79df412be5 100644 --- a/dev/packages/SeDuMi/index.html +++ b/dev/packages/SeDuMi/index.html @@ -17,4 +17,4 @@ MATLAB.mat"install_sedumi" end -julia> MATLAB.mat"savepath" +julia> MATLAB.mat"savepath" diff --git a/dev/packages/SumOfSquares/index.html b/dev/packages/SumOfSquares/index.html index 83a1ab1e691..a0d31710216 100644 --- a/dev/packages/SumOfSquares/index.html +++ b/dev/packages/SumOfSquares/index.html @@ -4,4 +4,4 @@ gtag('js', new Date()); gtag('config', 'G-0RZ8X3D3D0', {'page_path': location.pathname + location.search + location.hash});

SumOfSquares.jl

Build Status codecov

SumOfSquares.jl is a JuMP extension that, when used in conjunction with MultivariatePolynomial and PolyJuMP, implements a sum of squares reformulation for polynomial optimization.

License

SumOfSquares.jl is licensed under the MIT license.

Installation

Install SumOfSquares using Pkg.add:

import Pkg
-Pkg.add("SumOfSquares")

Documentation

See https://jump.dev/SumOfSquares.jl/stable for the most recently tagged version of the documentation.

See https://jump.dev/SumOfSquares.jl/dev for the in-development version of the documentation.

Presentations

Some presentations on, or using, SumOfSquares (see blegat/SumOfSquaresSlides for the source code of the presentations):

Citing

See CITATION.bib.

+Pkg.add("SumOfSquares")

Documentation

See https://jump.dev/SumOfSquares.jl/stable for the most recently tagged version of the documentation.

See https://jump.dev/SumOfSquares.jl/dev for the in-development version of the documentation.

Presentations

Some presentations on, or using, SumOfSquares (see blegat/SumOfSquaresSlides for the source code of the presentations):

Citing

See CITATION.bib.

diff --git a/dev/packages/Tulip/index.html b/dev/packages/Tulip/index.html index 15f74b40fb0..304b9ee6d3d 100644 --- a/dev/packages/Tulip/index.html +++ b/dev/packages/Tulip/index.html @@ -28,4 +28,4 @@ language = {en}, url = {https://doi.org/10.1007/s12532-020-00200-8}, urldate = {2021-03-07}, -} +} diff --git a/dev/packages/Xpress/index.html b/dev/packages/Xpress/index.html index d39c711baa9..ea95b964f9a 100644 --- a/dev/packages/Xpress/index.html +++ b/dev/packages/Xpress/index.html @@ -58,4 +58,4 @@ @test termination_status(model) == MOI.OPTIMAL @test primal_status(model) == MOI.FEASIBLE_POINT @test value(x) == 1 -@test value(y) == 2

Environment variables

C API

The C API can be accessed via Xpress.Lib.XPRSxx functions, where the names and arguments are identical to the C API.

See the Xpress documentation for details.

Documentation

For more information, consult the FICO optimizer manual.

+@test value(y) == 2

Environment variables

C API

The C API can be accessed via Xpress.Lib.XPRSxx functions, where the names and arguments are identical to the C API.

See the Xpress documentation for details.

Documentation

For more information, consult the FICO optimizer manual.

diff --git a/dev/packages/solvers/index.html b/dev/packages/solvers/index.html index caae8af89ad..dfc88595044 100644 --- a/dev/packages/solvers/index.html +++ b/dev/packages/solvers/index.html @@ -3,4 +3,4 @@ function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'G-0RZ8X3D3D0', {'page_path': location.pathname + location.search + location.hash}); -

Introduction

This section of the documentation contains brief documentation for some of the solvers that JuMP supports. The list of solvers is not exhaustive, but instead is intended to help you discover commonly used solvers.

Affiliation

Packages beginning with jump-dev/ are developed and maintained by the JuMP developers. In many cases, these packages wrap external solvers that are not developed by the JuMP developers and, while the Julia packages are all open-source, in some cases the solvers themselves are closed source commercial products.

Packages that do not begin with jump-dev/ are developed independently. The developers of these packages requested or consented to the inclusion of their README contents in the JuMP documentation for the benefit of users.

Adding new solvers

Written a solver? Add it to this section of the JuMP documentation by making a pull request to the docs/packages.toml file.

+

Introduction

This section of the documentation contains brief documentation for some of the solvers that JuMP supports. The list of solvers is not exhaustive, but instead is intended to help you discover commonly used solvers.

Affiliation

Packages beginning with jump-dev/ are developed and maintained by the JuMP developers. In many cases, these packages wrap external solvers that are not developed by the JuMP developers and, while the Julia packages are all open-source, in some cases the solvers themselves are closed source commercial products.

Packages that do not begin with jump-dev/ are developed independently. The developers of these packages requested or consented to the inclusion of their README contents in the JuMP documentation for the benefit of users.

Adding new solvers

Written a solver? Add it to this section of the JuMP documentation by making a pull request to the docs/packages.toml file.

diff --git a/dev/release_notes/index.html b/dev/release_notes/index.html index e80e4cd6f3e..895ae1de52f 100644 --- a/dev/release_notes/index.html +++ b/dev/release_notes/index.html @@ -3,13 +3,13 @@ function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'G-0RZ8X3D3D0', {'page_path': location.pathname + location.search + location.hash}); -

Release notes

The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.

Version 1.23.4 (November 8, 2024)

Fixed

  • Fixed UnsupportedNonlinearOperator error for the single argument LinearAlgebra.norm (#3864)
  • Fixed printing MOI.Interval with MIME"text/latex" (#3866)

Other

Version 1.23.3 (October 21, 2024)

Fixed

Other

Version 1.23.2 (September 13, 2024)

Fixed

  • Fixed an illegal simplification in MA.operate!! for NonlinearExpr (#3826)

Other

Version 1.23.1 (August 30, 2024)

Fixed

  • Fixed a bug with indicator constraints and the in set syntax (#3813)

Other

Version 1.23.0 (August 13, 2024)

Added

  • Added set inequality syntax for matrices (#3766)
  • Improved matrix inequality support (#3778) (#3805)

Fixed

  • Fixed a method for calling value on a ::Number (#3776)
  • Fixed querying dual of Symmetric and Hermitian equality constraints (#3797)
  • Fixed read_from_file for coefficient types other than Float64 (#3801)

Other

  • Documentation improvements
    • Fixed missing character in installation instructions (#3777)
    • Added a section of querying the Jacobian (#3779)
    • Clarify that SCIP does not support lazy constraints (#3784)
    • Fixed typo in knapsack.jl (#3792)
    • Added a warning to docs about tolerances in Bin and Int variables (#3794)
    • Clarify where to type installation commands (#3795)
  • Improve error message for common incorrect syntax in constraint macro (#3781)
  • Changed show(::IO, ::GenericModel) to a more informative tree structure (#3803)

Version 1.22.2 (June 17, 2024)

Fixed

  • Fixed printing to omit terms when printing a large array of expressions (#3759)
  • Fixed bug in printing when show is called on an invalid variable or constraint (#3763)

Other

  • Improved error message for unsupported kwargs in variable macro (#3751)
  • Improved error message for unsupported container syntax like x[A][B] (#3756)
  • Docstring improvements (#3758), (#3760), (#3761), (#3767)
  • Added warning to documentation about Y <= X, Set() syntax (#3769)
  • Work-around change on nightly (#3753), (#3754)
  • Improved printing of symmetric matrices when used in constraints (#3768)
  • Fixed a test for upcoming printing change in MOI (#3772)
  • Updated should_i_use.md (#3773)

Version 1.22.1 (May 17, 2024)

Fixed

  • Fixed bug including non-.jl files in src/macros.jl (#3747)

Other

  • Added DSDP to the list of supported solvers (#3745)
  • Updated YALMIP migration guide (#3748)

Version 1.22.0 (May 12, 2024)

Added

  • Added Base.complex(r, i) where r and i may be real-valued variables or affine or quadratic expressions (#3734)
  • Added @force_nonlinear for controlling when affine and quadratic expressions are instead parsed as nonlinear expressions. This can be useful for advanced users in a limited set of circumstances. (#3732)
  • Added support for returning the variable coefficients of a vector-valued constraint via normalized_coefficient. In addition, set_normalized_coefficients has been softly deprecated (no warning is thrown and old code will still work for all future 1.X releases of JuMP) in favor of set_normalized_coefficient. This change was made to unify how we get and set variable coefficients. (#3743)

Fixed

  • Fixed missing promote_operation method that resulted in slow code (#3730)
  • Improved performance of getindex for Containers.DenseAxisArray (#3731)
  • Fixed the error message when the legacy nonlinear API is mixed with the new nonlinear API. In particular, we now uniformly throw an error message when unexpected objects occur in nonlinear expressions. (#3741)

Other

Version 1.21.1 (April 11, 2024)

Fixed

  • Fixed behavior of complex-value related functions like real, imag, conj and abs2 when called on GenericNonlinearExpr. This fixes a method error when calling x' where x is an array of nonlinear expressions. As a related consequence, we now always error when creating nonlinear expressions with complex components. Previously, only some constructors were checked for complex expressionns. (#3724)

Other

Version 1.21.0 (March 31, 2024)

Added

  • Added support for matrix inequality constraints with the HermitianPSDCone (#3705)
  • Added batched modification methods for set_normalized_rhs, set_objective_coefficient and set_normalized_coefficient. Using these methods can be more efficient for some solvers (#3716)
  • Added the private constant _CONSTRAINT_LIMIT_FOR_PRINTING, which controls how many constraints are printed to the screen during print(model). The main purpose of this is to prevent large quantities of text being printed when print(model) is accidentally called on a large model. (#3686)

Fixed

  • Changed Containers.SparseAxisArray to use an OrderedDict as the backing data structure. Iterating over the elements in a SparseAxisArray now iterates in the order that the elements were created. Previously, the order was undefined behavior. (#3681)
  • Fixed complex variables for non-Float64 coefficient types (#3691)
  • Fixed LinearAlgebra.hermitan(::AbstractJuMPScalar) (#3693)
  • Fixed multiplying real scalar by Hermitian matrix (#3695)

Other

Version 1.20.0 (February 15, 2024)

Added

Fixed

  • Fixed compat of DimensionalData (#3666)
  • Fixed convert(::Type{NonlinearExpr}, ::Number)(#3672)

Other

  • Added Optim to list of solvers (#3624)
  • Improved linking within documentation (#3669)

Version 1.19.0 (February 1, 2024)

Added

  • Added support for modifying quadratic coefficients (#3658)

Fixed

  • Fixed short circuiting of && and || in macros (#3655)

Other

  • Added SDPLR to list of solvers (#3644)
  • Added new roadmap items (#3645)
  • Fixed vale.sh version (#3650)
  • Improve error messages in macros (#3653)
  • Refactoring of set_normalized_coefficient (#3660) (#3661)
  • Update docs/packages.toml (#3662)

Version 1.18.1 (January 6, 2024)

Fixed

Version 1.18.0 (January 2, 2024)

Added

Fixed

Other

  • Added DisjunctiveProgrammingto extension-tests (#3597)
  • Added DisjunctiveProgrammingto docs (#3598)
  • Added DocumenterCitations to the docs (#3596), (#3630)
  • Migrate from SnoopPrecompile to PrecompileTools (#3608)
  • Minor documentation updates (#3623), (#3628), (#3635), (#3640), (#3643)

Version 1.17.0 (December 4, 2023)

Added

Fixed

Other

Version 1.16.0 (October 24, 2023)

Added

  • Added := operator for Boolean satisfiability problems (#3530)

Fixed

Other

Version 1.15.1 (September 24, 2023)

Fixed

Other

Version 1.15.0 (September 15, 2023)

This is a large minor release because it adds an entirely new data structure and API path for working with nonlinear programs. The previous nonlinear interface remains unchanged and is documented at Nonlinear Modeling (Legacy). The new interface is a treated as a non-breaking feature addition and is documented at Nonlinear Modeling.

Breaking

Although the new nonlinear interface is a feature addition, there are two changes which might be breaking for a very small number of users.

  • The syntax inside JuMP macros is parsed using a different code path, even for linear and quadratic expressions. We made this change to unify how we parse linear, quadratic, and nonlinear expressions. In all cases, the new code returns equivalent expressions, but because of the different order of operations, there are three changes to be aware of when updating:
    • The printed form of the expression may change, for example from x * y to y * x. This can cause tests which test the String representation of a model to fail.
    • Some coefficients may change slightly due to floating point round-off error.
    • Particularly when working with a JuMP extension, you may encounter a MethodError due to a missing or ambiguous method. These errors are due to previously existing bugs that were not triggered by the previous parsing code. If you encounter such an error, please open a GitHub issue.
  • The methods for Base.:^(x::VariableRef, n::Integer) and Base.:^(x::AffExpr, n::Integer) have changed. Previously, these methods supported only n = 0, 1, 2 and they always returned a QuadExpr, even for the case when n = 0 or n = 1. Now:
    • x^0 returns one(T), where T is the value_type of the model (defaults to Float64)
    • x^1 returns x
    • x^2 returns a QuadExpr
    • x^n where !(0 <= n <= 2) returns a NonlinearExpr.
    We made this change to support nonlinear expressions and to align the mathematical definition of the operation with their return type. (Previously, users were surprised that x^1 returned a QuadExpr.) As a consequence of this change, the methods are now not type-stable. This means that the compiler cannot prove that x^2 returns a QuadExpr. If benchmarking shows that this is a performance problem, you can use the type-stable x * x instead of x^2.

Added

Fixed

Other

Version 1.14.1 (September 2, 2023)

Fixed

  • Fix links in Documentation (#3478)

Version 1.14.0 (August 27, 2023)

Added

Fixed

  • Fixed model_convert for BridgeableConstraint (#3437)
  • Fixed printing models with integer coefficients larger than typemax(Int) (#3447)
  • Fixed support for constant left-hand side functions in a complementarity constraint (#3452)

Other

  • Updated packages used in documentation (#3444) (#3455)
  • Fixed docstring tests (#3445)
  • Fixed printing change for MathOptInterface (#3446)
  • Fixed typos in documentation (#3448) (#3457)
  • Added SCIP to callback documentation (#3449)

Version 1.13.0 (July 27, 2023)

Added

Fixed

Other

  • Added Loraine.jl to the installation table (#3426)
  • Removed Penopt.jl from packages.toml (#3428)
  • Improved problem statement in cannery example of tutorial (#3430)
  • Minor cleanups in Containers.DenseAxisArray implementation (#3429)
  • Changed nested_problems.jl: outer/inner to upper/lower (#3433)
  • Removed second SDP relaxation in OPF tutorial (#3432)

Version 1.12.0 (June 19, 2023)

Added

Fixed

Other

Version 1.11.1 (May 19, 2023)

Fixed

  • Fixed a poor error message when sum(::DenseAxisArray; dims) was called (#3338)
  • Fixed support for dependent sets in the @variable macro (#3344)
  • Fixed a performance bug in constraints with sparse symmetric matrices (#3349)

Other

  • Improved the printing of complex numbers (#3332)
  • When printing, sets which contain constants ending in .0 now print as integers. This follows the behavior of constants in functions (#3341)
  • Added InfiniteOpt to the extensions documentation (#3343)
  • Added more documentation for the exponential cone (#3345) (#3347)
  • Added checklists for developers (#3346) (#3355)
  • Fixed test support upcoming Julia nightly (#3351)
  • Fixed extension-tests.yml action (#3353)
  • Add more solvers to the documentation (#3359) (#3361) (#3362)

Version 1.11.0 (May 3, 2023)

Added

Fixed

  • Fixed tests for MOI v1.14.0 release (#3312)
  • Fixed indexing containers when an axis is Vector{Any} that contains a Vector{Any} element (#3280)
  • Fixed getindex(::AbstractJuMPScalar) which is called for an expression like x[] (#3314)
  • Fixed bug in set_string_names_on_creation with a vector of variables (#3322)
  • Fixed bug in memoize function in nonlinear documentation (#3337)

Other

  • Fixed typos in the documentation (#3317) (#3318) (#3328)
  • Added a test for the order of setting start values (#3315)
  • Added READMEs of solvers and extensions to the docs (#3309) (#3320) (#3327) (#3329) (#3333)
  • Style improvements to src/variables.jl (#3324)
  • Clarify that column generation does not find global optimum (#3325)
  • Add a GitHub actions workflow for testing extensions prior to release (#3331)
  • Document the release process for JuMP (#3334)
  • Fix links to discourse and chatroom (#3335)

Version 1.10.0 (April 3, 2023)

Added

Fixed

  • Fixed [compat] bound for MathOptInterface in Project.toml (#3272)

Other

Version 1.9.0 (March 7, 2023)

Added

Fixed

  • The matrix returned by a variable in HermitianPSDCone is now a LinearAlgebra.Hermitian matrix. This is potentially breaking if you have written code to assume the return is a Matrix. (#3245) (#3246)
  • Fixed missing support for Base.isreal of expressions (#3252)

Other

Version 1.8.2 (February 27, 2023)

Fixed

  • Fixed dot product between complex JuMP expression and number (#3244)

Other

  • Polish simple SDP examples (#3232)

Version 1.8.1 (February 23, 2023)

Fixed

  • Fixed support for init in nonlinear generator expressions (#3226)

Other

  • Use and document import MathOptInterface as MOI (#3222)
  • Removed references in documentation to multiobjective optimization being unsupported (#3223)
  • Added tutorial on multi-objective portfolio optimization (#3227)
  • Refactored some of the conic tutorials (#3229)
  • Fixed typos in the documentation (#3230)
  • Added tutorial on parallelism (#3231)

Version 1.8.0 (February 16, 2023)

Added

  • Added --> syntax support for indicator constraints. The old syntax of => remains supported (#3207)
  • Added <--> syntax for reified constraints. For now, few solvers support reified constraints (#3206)
  • Added fix_discrete_variables. This is most useful for computing the dual of a mixed-integer program (#3208)
  • Added support for vector-valued objectives. For details, see the Multi-objective knapsack tutorial (#3176)

Fixed

  • Fixed a bug in lp_sensitivity_report by switching to an explicit LU factorization of the basis matrix (#3182)
  • Fixed a bug that prevented [; kwarg] arguments in macros (#3220)

Other

Version 1.7.0 (January 25, 2023)

Added

Other

  • Large refactoring of the tests (#3166) (#3167) (#3168) (#3169) (#3170) (#3171)
  • Remove unreachable code due to VERSION checks (#3172)
  • Document how to test JuMP extensions (#3174)
  • Fix method ambiguities in Containers (#3173)
  • Improve error message that is thrown when = is used instead of == in the @constraint macro (#3178)
  • Improve the error message when Bool is used instead of Bin in the @variable macro (#3180)
  • Update versions of the documentation (#3185)
  • Tidy the import of packages and remove unnecessary prefixes (#3186) (#3187)
  • Refactor src/JuMP.jl by moving methods into more relevant files (#3188)
  • Fix docstring of Model not appearing in the documentation (#3198)

Version 1.6.0 (January 1, 2023)

Added

Fixed

  • Fixed promotion of complex expressions (#3150) (#3164)

Other

  • Added Benders tutorial with in-place resolves (#3145)
  • Added more Tips and tricks for linear programs (#3144) (#3163)
  • Clarified documentation that start can depend on the indices of a variable container (#3148)
  • Replace instances of length and size by the recommended eachindex and axes (#3149)
  • Added a warning explaining why the model is dirty when accessing solution results from a modified model (#3156)
  • Clarify documentation that PSD ensures a symmetric matrix (#3159)
  • Maintenance of the JuMP test suite (#3146) (#3158) (#3162)

Version 1.5.0 (December 8, 2022)

Added

Fixed

Other

Version 1.4.0 (October 29, 2022)

Added

Fixed

  • Fixed a bug in copy_to(dest::Model, src::MOI.ModelLike) when src has nonlinear components (#3101)
  • Fixed the printing of (-1.0 + 0.0im) coefficients in complex expressions (#3112)
  • Fixed a parsing bug in nonlinear expressions with generator statements that contain multiple for statements (#3116)

Other

  • Converted the multi-commodity flow tutorial to use an SQLite database (#3098)
  • Fixed a number of typos in the documentation (#3103) (#3107) (#3018)
  • Improved various style aspects of the PDF documentation (#3095) (#3098) (#3102)

Version 1.3.1 (September 28, 2022)

Fixed

  • Fixed a performance issue in relax_integrality (#3087)
  • Fixed the type stability of operators with Complex arguments (#3072)
  • Fixed a bug which added additional +() terms to some nonlinear expressions (#3091)
  • Fixed potential method ambiguities with AffExpr and QuadExpr objects (#3092)

Other

Version 1.3.0 (September 5, 2022)

Added

  • Support slicing in SparseAxisArray (#3031)

Fixed

  • Fixed a bug introduced in v1.2.0 that prevented DenseAxisArrays with Vector keys (#3064)

Other

Version 1.2.1 (August 22, 2022)

Fixed

  • Fixed a bug when parsing two-sided nonlinear constraints (#3045)

Version 1.2.0 (August 16, 2022)

Breaking

This is a large minor release because it significantly refactors the internal code for handling nonlinear programs to use the MathOptInterface.Nonlinear submodule that was introduced in MathOptInterface v1.3.0. As a consequence, the internal datastructure in model.nlp_data has been removed, as has the JuMP._Derivatives submodule. Despite the changes, the public API for nonlinear programming has not changed, and any code that uses only the public API and that worked with v1.1.1 will continue to work with v1.2.0.

Added

  • Added all_constraints(model; include_variable_in_set_constraints) which simplifies returning a list of all constraint indices in the model.
  • Added the ability to delete nonlinear constraints via delete(::Model, ::NonlinearConstraintRef).
  • Added the ability to provide an explicit Hessian for a multivariate user-defined function.
  • Added support for querying the primal value of a nonlinear constraint via value(::NonlinearConstraintRef)

Fixed

  • Fixed a bug in Containers.DenseAxisArray so that it now supports indexing with keys that hash to the same value, even if they are different types, for example, Int32 and Int64.
  • Fixed a bug printing the model when the solver does not support MOI.Name.

Other

  • Added a constraint programming formulation to the Sudoku tutorial.
  • Added newly supported solvers Pajarito, Clarabel, and COPT to the installation table.
  • Fixed a variety of other miscellaneous issues in the documentation.

Version 1.1.1 (June 14, 2022)

Other

  • Fixed problem displaying LaTeX in the documentation
  • Minor updates to the style guide
  • Updated to MOI v1.4.0 in the documentation

Version 1.1.0 (May 25, 2022)

Added

  • Added num_constraints(::Model; count_variable_in_set_constraints) to simplify the process of counting the number of constraints in a model
  • Added VariableRef(::ConstraintRef) for querying the variable associated with a bound or integrality constraint.
  • Added set_normalized_coefficients for modifying the variable coefficients of a vector-valued constraint.
  • Added set_string_names_on_creation to disable creating String names for variables and constraints. This can improve performance.

Fixed

  • Fixed a bug passing nothing to the start keyword of @variable

Other

  • New tutorials:
    • Sensitivity analysis of a linear program
    • Serving web apps
  • Minimal ellipse SDP tutorial refactored and improved
  • Docs updated to the latest version of each package
  • Lots of minor fixes and improvements to the documentation

Version 1.0.0 (March 24, 2022)

Read more about this release, along with an acknowledgement of all the contributors in our JuMP 1.0.0 is released blog post.

Breaking

  • The previously deprecated functions (v0.23.0, v0.23.1) have been removed. Deprecation was to improve consistency of function names:
    • num_nl_constraints (see num_nonlinear_constraints)
    • all_nl_constraints (see all_nonlinear_constraints)
    • add_NL_expression (see add_nonlinear_expression)
    • set_NL_objective (see set_nonlinear_objective)
    • add_NL_constraint (see add_nonlinear_constraint)
    • nl_expr_string (see nonlinear_expr_string)
    • nl_constraint_string (see nonlinear_constraint_string)
    • SymMatrixSpace (see SymmetricMatrixSpace)
  • The unintentionally exported variable JuMP.op_hint has been renamed to the unexported JuMP._OP_HINT

Fixed

  • Fixed a bug writing .nl files
  • Fixed a bug broadcasting SparseAxisArrays

Version 0.23.2 (March 14, 2022)

Added

  • Added relative_gap to solution_summary
  • register now throws an informative error if the function is not differentiable using ForwardDiff. In some cases, the check in register will encounter a false negative, and the informative error will be thrown at run-time. This usually happens when the function is non-differentiable in a subset of the domain.

Fixed

  • Fixed a scoping issue when extending the container keyword of containers

Other

  • Docs updated to the latest version of each package

Version 0.23.1 (March 2, 2022)

Deprecated

  • nl_expr_string and nl_constraint_string have been renamed to nonlinear_expr_string and nonlinear_constraint_string. The old methods still exist with deprecation warnings. This change should impact very few users because to call them you must rely on private internals of the nonlinear API. Users are encouraged to use sprint(show, x) instead, where x is the nonlinear expression or constraint of interest.

Added

  • Added support for Base.abs2(x) where x is a variable or affine expression. This is mainly useful for complex-valued constraints.

Fixed

  • Fixed addition of complex and real affine expressions
  • Fixed arithmetic for Complex-valued quadratic expressions
  • Fixed variable bounds passed as Rational{Int}(Inf)
  • Fixed printing of the coefficient (0 + 1im)
  • Fixed a bug when solution_summary is called prior to optimize!

Version 0.23.0 (February 25, 2022)

JuMP v0.23.0 is a breaking release. It is also a release-candidate for JuMP v1.0.0. That is, if no issues are found with the v0.23.0 release, then it will be re-tagged as v1.0.0.

Breaking

  • Julia 1.6 is now the minimum supported version
  • MathOptInterface has been updated to v1.0.0
  • All previously deprecated functionality has been removed
  • PrintMode, REPLMode and IJuliaMode have been removed in favor of the MIME types MIME"text/plain" and MIME"text/latex". Replace instances of ::Type{REPLMode} with ::MIME"text/plain", REPLMode with MIME("text/plain"), ::Type{IJuliaMode} with ::MIME"text/latex", and IJuliaMode with MIME("text/latex").
  • Functions containing the nl_ acronym have been renamed to the more explicit nonlinear_. For example, num_nl_constraints is now num_nonlinear_constraints and set_NL_objective is now set_nonlinear_objective. Calls to the old functions throw an error explaining the new name.
  • SymMatrixSpace has been renamed to SymmetricMatrixSpace

Added

  • Added nonlinear_dual_start_value and set_nonlinear_dual_start_value
  • Added preliminary support for Complex coefficient types

Fixed

  • Fixed a bug in solution_summary

Other

  • MILP examples have been migrated from GLPK to HiGHS
  • Fixed various typos
  • Improved section on setting constraint start values

Troubleshooting problems when updating

If you experience problems when updating, you are likely using previously deprecated functionality. (By default, Julia does not warn when you use deprecated features.)

To find the deprecated features you are using, start Julia with --depwarn=yes:

$ julia --depwarn=yes

Then install JuMP v0.22.3:

julia> using Pkg
-julia> pkg"add JuMP@0.22.3"

And then run your code. Apply any suggestions, or search the release notes below for advice on updating a specific deprecated feature.

Version 0.22.3 (February 10, 2022)

Fixed

  • Fixed a reproducibility issue in the TSP tutorial
  • Fixed a reproducibility issue in the max_cut_sdp tutorial
  • Fixed a bug broadcasting an empty SparseAxisArray

Other

  • Added a warning and improved documentation for the modify-then-query case
  • Fixed a typo in the docstring of RotatedSecondOrderCone
  • Added Aqua.jl as a check for code health
  • Added introductions to each section of the tutorials
  • Improved the column generation and Benders decomposition tutorials
  • Updated documentation to MOI v0.10.8
  • Updated JuliaFormatter to v0.22.2

Version 0.22.2 (January 10, 2022)

Added

  • The function all_nl_constraints now returns all nonlinear constraints in a model
  • start_value and set_start_value can now be used to get and set the primal start for constraint references
  • Plural macros now return a tuple containing the elements that were defined instead of nothing
  • Anonymous variables are now printed as _[i] where i is the index of the variable instead of noname. Calling name(x) still returns "" so this is non-breaking.

Fixed

  • Fixed handling of min and max in nonlinear expressions
  • CartesianIndex is no longer allowed as a key for DenseAxisArrays.

Other

  • Improved the performance of GenericAffExpr
  • Added a tutorial on the Travelling Salesperson Problem
  • Added a tutorial on querying the Hessian of a nonlinear program
  • Added documentation on using custom solver binaries.

Version 0.22.1 (November 29, 2021)

Added

  • Export OptimizationSense enum, with instances: MIN_SENSE, MAX_SENSE, and FEASIBILITY_SENSE
  • Add Base.isempty(::Model) to match Base.empty(::Model)

Fixed

  • Fix bug in container with tuples as indices
  • Fix bug in set_time_limit_sec

Other

  • Add tutorial "Design patterns for larger models"
  • Remove release notes section from PDF
  • General edits of the documentation and error messages

Version 0.22.0 (November 10, 2021)

JuMP v0.22 is a breaking release

Breaking

JuMP 0.22 contains a number of breaking changes. However, these should be invisible for the majority of users. You will mostly encounter these breaking changes if you: wrote a JuMP extension, accessed backend(model), or called @SDconstraint.

The breaking changes are as follows:

  • MathOptInterface has been updated to v0.10.4. For users who have interacted with the MOI backend, this contains a large number of breaking changes. Read the MathOptInterface release notes for more details.
  • The bridge_constraints keyword argument to Model and set_optimizer has been renamed add_bridges to reflect that more thing were bridged than just constraints.
  • The backend(model) field now contains a concrete instance of a MOI.Utilities.CachingOptimizer instead of one with an abstractly typed optimizer field. In most cases, this will lead to improved performance. However, calling set_optimizer after backend invalidates the old backend. For example:
    model = Model()
    +

    Release notes

    The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.

    Version 1.23.5 (November 19, 2024)

    Fixed

    Other

    Version 1.23.4 (November 8, 2024)

    Fixed

    • Fixed UnsupportedNonlinearOperator error for the single argument LinearAlgebra.norm (#3864)
    • Fixed printing MOI.Interval with MIME"text/latex" (#3866)

    Other

    Version 1.23.3 (October 21, 2024)

    Fixed

    Other

    Version 1.23.2 (September 13, 2024)

    Fixed

    • Fixed an illegal simplification in MA.operate!! for NonlinearExpr (#3826)

    Other

    Version 1.23.1 (August 30, 2024)

    Fixed

    • Fixed a bug with indicator constraints and the in set syntax (#3813)

    Other

    Version 1.23.0 (August 13, 2024)

    Added

    • Added set inequality syntax for matrices (#3766)
    • Improved matrix inequality support (#3778) (#3805)

    Fixed

    • Fixed a method for calling value on a ::Number (#3776)
    • Fixed querying dual of Symmetric and Hermitian equality constraints (#3797)
    • Fixed read_from_file for coefficient types other than Float64 (#3801)

    Other

    • Documentation improvements
      • Fixed missing character in installation instructions (#3777)
      • Added a section of querying the Jacobian (#3779)
      • Clarify that SCIP does not support lazy constraints (#3784)
      • Fixed typo in knapsack.jl (#3792)
      • Added a warning to docs about tolerances in Bin and Int variables (#3794)
      • Clarify where to type installation commands (#3795)
    • Improve error message for common incorrect syntax in constraint macro (#3781)
    • Changed show(::IO, ::GenericModel) to a more informative tree structure (#3803)

    Version 1.22.2 (June 17, 2024)

    Fixed

    • Fixed printing to omit terms when printing a large array of expressions (#3759)
    • Fixed bug in printing when show is called on an invalid variable or constraint (#3763)

    Other

    • Improved error message for unsupported kwargs in variable macro (#3751)
    • Improved error message for unsupported container syntax like x[A][B] (#3756)
    • Docstring improvements (#3758), (#3760), (#3761), (#3767)
    • Added warning to documentation about Y <= X, Set() syntax (#3769)
    • Work-around change on nightly (#3753), (#3754)
    • Improved printing of symmetric matrices when used in constraints (#3768)
    • Fixed a test for upcoming printing change in MOI (#3772)
    • Updated should_i_use.md (#3773)

    Version 1.22.1 (May 17, 2024)

    Fixed

    • Fixed bug including non-.jl files in src/macros.jl (#3747)

    Other

    • Added DSDP to the list of supported solvers (#3745)
    • Updated YALMIP migration guide (#3748)

    Version 1.22.0 (May 12, 2024)

    Added

    • Added Base.complex(r, i) where r and i may be real-valued variables or affine or quadratic expressions (#3734)
    • Added @force_nonlinear for controlling when affine and quadratic expressions are instead parsed as nonlinear expressions. This can be useful for advanced users in a limited set of circumstances. (#3732)
    • Added support for returning the variable coefficients of a vector-valued constraint via normalized_coefficient. In addition, set_normalized_coefficients has been softly deprecated (no warning is thrown and old code will still work for all future 1.X releases of JuMP) in favor of set_normalized_coefficient. This change was made to unify how we get and set variable coefficients. (#3743)

    Fixed

    • Fixed missing promote_operation method that resulted in slow code (#3730)
    • Improved performance of getindex for Containers.DenseAxisArray (#3731)
    • Fixed the error message when the legacy nonlinear API is mixed with the new nonlinear API. In particular, we now uniformly throw an error message when unexpected objects occur in nonlinear expressions. (#3741)

    Other

    Version 1.21.1 (April 11, 2024)

    Fixed

    • Fixed behavior of complex-value related functions like real, imag, conj and abs2 when called on GenericNonlinearExpr. This fixes a method error when calling x' where x is an array of nonlinear expressions. As a related consequence, we now always error when creating nonlinear expressions with complex components. Previously, only some constructors were checked for complex expressionns. (#3724)

    Other

    Version 1.21.0 (March 31, 2024)

    Added

    • Added support for matrix inequality constraints with the HermitianPSDCone (#3705)
    • Added batched modification methods for set_normalized_rhs, set_objective_coefficient and set_normalized_coefficient. Using these methods can be more efficient for some solvers (#3716)
    • Added the private constant _CONSTRAINT_LIMIT_FOR_PRINTING, which controls how many constraints are printed to the screen during print(model). The main purpose of this is to prevent large quantities of text being printed when print(model) is accidentally called on a large model. (#3686)

    Fixed

    • Changed Containers.SparseAxisArray to use an OrderedDict as the backing data structure. Iterating over the elements in a SparseAxisArray now iterates in the order that the elements were created. Previously, the order was undefined behavior. (#3681)
    • Fixed complex variables for non-Float64 coefficient types (#3691)
    • Fixed LinearAlgebra.hermitan(::AbstractJuMPScalar) (#3693)
    • Fixed multiplying real scalar by Hermitian matrix (#3695)

    Other

    Version 1.20.0 (February 15, 2024)

    Added

    Fixed

    • Fixed compat of DimensionalData (#3666)
    • Fixed convert(::Type{NonlinearExpr}, ::Number)(#3672)

    Other

    • Added Optim to list of solvers (#3624)
    • Improved linking within documentation (#3669)

    Version 1.19.0 (February 1, 2024)

    Added

    • Added support for modifying quadratic coefficients (#3658)

    Fixed

    • Fixed short circuiting of && and || in macros (#3655)

    Other

    • Added SDPLR to list of solvers (#3644)
    • Added new roadmap items (#3645)
    • Fixed vale.sh version (#3650)
    • Improve error messages in macros (#3653)
    • Refactoring of set_normalized_coefficient (#3660) (#3661)
    • Update docs/packages.toml (#3662)

    Version 1.18.1 (January 6, 2024)

    Fixed

    Version 1.18.0 (January 2, 2024)

    Added

    Fixed

    Other

    • Added DisjunctiveProgrammingto extension-tests (#3597)
    • Added DisjunctiveProgrammingto docs (#3598)
    • Added DocumenterCitations to the docs (#3596), (#3630)
    • Migrate from SnoopPrecompile to PrecompileTools (#3608)
    • Minor documentation updates (#3623), (#3628), (#3635), (#3640), (#3643)

    Version 1.17.0 (December 4, 2023)

    Added

    Fixed

    Other

    Version 1.16.0 (October 24, 2023)

    Added

    • Added := operator for Boolean satisfiability problems (#3530)

    Fixed

    Other

    Version 1.15.1 (September 24, 2023)

    Fixed

    Other

    Version 1.15.0 (September 15, 2023)

    This is a large minor release because it adds an entirely new data structure and API path for working with nonlinear programs. The previous nonlinear interface remains unchanged and is documented at Nonlinear Modeling (Legacy). The new interface is a treated as a non-breaking feature addition and is documented at Nonlinear Modeling.

    Breaking

    Although the new nonlinear interface is a feature addition, there are two changes which might be breaking for a very small number of users.

    • The syntax inside JuMP macros is parsed using a different code path, even for linear and quadratic expressions. We made this change to unify how we parse linear, quadratic, and nonlinear expressions. In all cases, the new code returns equivalent expressions, but because of the different order of operations, there are three changes to be aware of when updating:
      • The printed form of the expression may change, for example from x * y to y * x. This can cause tests which test the String representation of a model to fail.
      • Some coefficients may change slightly due to floating point round-off error.
      • Particularly when working with a JuMP extension, you may encounter a MethodError due to a missing or ambiguous method. These errors are due to previously existing bugs that were not triggered by the previous parsing code. If you encounter such an error, please open a GitHub issue.
    • The methods for Base.:^(x::VariableRef, n::Integer) and Base.:^(x::AffExpr, n::Integer) have changed. Previously, these methods supported only n = 0, 1, 2 and they always returned a QuadExpr, even for the case when n = 0 or n = 1. Now:
      • x^0 returns one(T), where T is the value_type of the model (defaults to Float64)
      • x^1 returns x
      • x^2 returns a QuadExpr
      • x^n where !(0 <= n <= 2) returns a NonlinearExpr.
      We made this change to support nonlinear expressions and to align the mathematical definition of the operation with their return type. (Previously, users were surprised that x^1 returned a QuadExpr.) As a consequence of this change, the methods are now not type-stable. This means that the compiler cannot prove that x^2 returns a QuadExpr. If benchmarking shows that this is a performance problem, you can use the type-stable x * x instead of x^2.

    Added

    Fixed

    Other

    Version 1.14.1 (September 2, 2023)

    Fixed

    • Fix links in Documentation (#3478)

    Version 1.14.0 (August 27, 2023)

    Added

    Fixed

    • Fixed model_convert for BridgeableConstraint (#3437)
    • Fixed printing models with integer coefficients larger than typemax(Int) (#3447)
    • Fixed support for constant left-hand side functions in a complementarity constraint (#3452)

    Other

    • Updated packages used in documentation (#3444) (#3455)
    • Fixed docstring tests (#3445)
    • Fixed printing change for MathOptInterface (#3446)
    • Fixed typos in documentation (#3448) (#3457)
    • Added SCIP to callback documentation (#3449)

    Version 1.13.0 (July 27, 2023)

    Added

    Fixed

    Other

    • Added Loraine.jl to the installation table (#3426)
    • Removed Penopt.jl from packages.toml (#3428)
    • Improved problem statement in cannery example of tutorial (#3430)
    • Minor cleanups in Containers.DenseAxisArray implementation (#3429)
    • Changed nested_problems.jl: outer/inner to upper/lower (#3433)
    • Removed second SDP relaxation in OPF tutorial (#3432)

    Version 1.12.0 (June 19, 2023)

    Added

    Fixed

    Other

    Version 1.11.1 (May 19, 2023)

    Fixed

    • Fixed a poor error message when sum(::DenseAxisArray; dims) was called (#3338)
    • Fixed support for dependent sets in the @variable macro (#3344)
    • Fixed a performance bug in constraints with sparse symmetric matrices (#3349)

    Other

    • Improved the printing of complex numbers (#3332)
    • When printing, sets which contain constants ending in .0 now print as integers. This follows the behavior of constants in functions (#3341)
    • Added InfiniteOpt to the extensions documentation (#3343)
    • Added more documentation for the exponential cone (#3345) (#3347)
    • Added checklists for developers (#3346) (#3355)
    • Fixed test support upcoming Julia nightly (#3351)
    • Fixed extension-tests.yml action (#3353)
    • Add more solvers to the documentation (#3359) (#3361) (#3362)

    Version 1.11.0 (May 3, 2023)

    Added

    Fixed

    • Fixed tests for MOI v1.14.0 release (#3312)
    • Fixed indexing containers when an axis is Vector{Any} that contains a Vector{Any} element (#3280)
    • Fixed getindex(::AbstractJuMPScalar) which is called for an expression like x[] (#3314)
    • Fixed bug in set_string_names_on_creation with a vector of variables (#3322)
    • Fixed bug in memoize function in nonlinear documentation (#3337)

    Other

    • Fixed typos in the documentation (#3317) (#3318) (#3328)
    • Added a test for the order of setting start values (#3315)
    • Added READMEs of solvers and extensions to the docs (#3309) (#3320) (#3327) (#3329) (#3333)
    • Style improvements to src/variables.jl (#3324)
    • Clarify that column generation does not find global optimum (#3325)
    • Add a GitHub actions workflow for testing extensions prior to release (#3331)
    • Document the release process for JuMP (#3334)
    • Fix links to discourse and chatroom (#3335)

    Version 1.10.0 (April 3, 2023)

    Added

    Fixed

    • Fixed [compat] bound for MathOptInterface in Project.toml (#3272)

    Other

    Version 1.9.0 (March 7, 2023)

    Added

    Fixed

    • The matrix returned by a variable in HermitianPSDCone is now a LinearAlgebra.Hermitian matrix. This is potentially breaking if you have written code to assume the return is a Matrix. (#3245) (#3246)
    • Fixed missing support for Base.isreal of expressions (#3252)

    Other

    Version 1.8.2 (February 27, 2023)

    Fixed

    • Fixed dot product between complex JuMP expression and number (#3244)

    Other

    • Polish simple SDP examples (#3232)

    Version 1.8.1 (February 23, 2023)

    Fixed

    • Fixed support for init in nonlinear generator expressions (#3226)

    Other

    • Use and document import MathOptInterface as MOI (#3222)
    • Removed references in documentation to multiobjective optimization being unsupported (#3223)
    • Added tutorial on multi-objective portfolio optimization (#3227)
    • Refactored some of the conic tutorials (#3229)
    • Fixed typos in the documentation (#3230)
    • Added tutorial on parallelism (#3231)

    Version 1.8.0 (February 16, 2023)

    Added

    • Added --> syntax support for indicator constraints. The old syntax of => remains supported (#3207)
    • Added <--> syntax for reified constraints. For now, few solvers support reified constraints (#3206)
    • Added fix_discrete_variables. This is most useful for computing the dual of a mixed-integer program (#3208)
    • Added support for vector-valued objectives. For details, see the Multi-objective knapsack tutorial (#3176)

    Fixed

    • Fixed a bug in lp_sensitivity_report by switching to an explicit LU factorization of the basis matrix (#3182)
    • Fixed a bug that prevented [; kwarg] arguments in macros (#3220)

    Other

    Version 1.7.0 (January 25, 2023)

    Added

    Other

    • Large refactoring of the tests (#3166) (#3167) (#3168) (#3169) (#3170) (#3171)
    • Remove unreachable code due to VERSION checks (#3172)
    • Document how to test JuMP extensions (#3174)
    • Fix method ambiguities in Containers (#3173)
    • Improve error message that is thrown when = is used instead of == in the @constraint macro (#3178)
    • Improve the error message when Bool is used instead of Bin in the @variable macro (#3180)
    • Update versions of the documentation (#3185)
    • Tidy the import of packages and remove unnecessary prefixes (#3186) (#3187)
    • Refactor src/JuMP.jl by moving methods into more relevant files (#3188)
    • Fix docstring of Model not appearing in the documentation (#3198)

    Version 1.6.0 (January 1, 2023)

    Added

    Fixed

    • Fixed promotion of complex expressions (#3150) (#3164)

    Other

    • Added Benders tutorial with in-place resolves (#3145)
    • Added more Tips and tricks for linear programs (#3144) (#3163)
    • Clarified documentation that start can depend on the indices of a variable container (#3148)
    • Replace instances of length and size by the recommended eachindex and axes (#3149)
    • Added a warning explaining why the model is dirty when accessing solution results from a modified model (#3156)
    • Clarify documentation that PSD ensures a symmetric matrix (#3159)
    • Maintenance of the JuMP test suite (#3146) (#3158) (#3162)

    Version 1.5.0 (December 8, 2022)

    Added

    Fixed

    Other

    Version 1.4.0 (October 29, 2022)

    Added

    Fixed

    • Fixed a bug in copy_to(dest::Model, src::MOI.ModelLike) when src has nonlinear components (#3101)
    • Fixed the printing of (-1.0 + 0.0im) coefficients in complex expressions (#3112)
    • Fixed a parsing bug in nonlinear expressions with generator statements that contain multiple for statements (#3116)

    Other

    • Converted the multi-commodity flow tutorial to use an SQLite database (#3098)
    • Fixed a number of typos in the documentation (#3103) (#3107) (#3018)
    • Improved various style aspects of the PDF documentation (#3095) (#3098) (#3102)

    Version 1.3.1 (September 28, 2022)

    Fixed

    • Fixed a performance issue in relax_integrality (#3087)
    • Fixed the type stability of operators with Complex arguments (#3072)
    • Fixed a bug which added additional +() terms to some nonlinear expressions (#3091)
    • Fixed potential method ambiguities with AffExpr and QuadExpr objects (#3092)

    Other

    Version 1.3.0 (September 5, 2022)

    Added

    • Support slicing in SparseAxisArray (#3031)

    Fixed

    • Fixed a bug introduced in v1.2.0 that prevented DenseAxisArrays with Vector keys (#3064)

    Other

    Version 1.2.1 (August 22, 2022)

    Fixed

    • Fixed a bug when parsing two-sided nonlinear constraints (#3045)

    Version 1.2.0 (August 16, 2022)

    Breaking

    This is a large minor release because it significantly refactors the internal code for handling nonlinear programs to use the MathOptInterface.Nonlinear submodule that was introduced in MathOptInterface v1.3.0. As a consequence, the internal datastructure in model.nlp_data has been removed, as has the JuMP._Derivatives submodule. Despite the changes, the public API for nonlinear programming has not changed, and any code that uses only the public API and that worked with v1.1.1 will continue to work with v1.2.0.

    Added

    • Added all_constraints(model; include_variable_in_set_constraints) which simplifies returning a list of all constraint indices in the model.
    • Added the ability to delete nonlinear constraints via delete(::Model, ::NonlinearConstraintRef).
    • Added the ability to provide an explicit Hessian for a multivariate user-defined function.
    • Added support for querying the primal value of a nonlinear constraint via value(::NonlinearConstraintRef)

    Fixed

    • Fixed a bug in Containers.DenseAxisArray so that it now supports indexing with keys that hash to the same value, even if they are different types, for example, Int32 and Int64.
    • Fixed a bug printing the model when the solver does not support MOI.Name.

    Other

    • Added a constraint programming formulation to the Sudoku tutorial.
    • Added newly supported solvers Pajarito, Clarabel, and COPT to the installation table.
    • Fixed a variety of other miscellaneous issues in the documentation.

    Version 1.1.1 (June 14, 2022)

    Other

    • Fixed problem displaying LaTeX in the documentation
    • Minor updates to the style guide
    • Updated to MOI v1.4.0 in the documentation

    Version 1.1.0 (May 25, 2022)

    Added

    • Added num_constraints(::Model; count_variable_in_set_constraints) to simplify the process of counting the number of constraints in a model
    • Added VariableRef(::ConstraintRef) for querying the variable associated with a bound or integrality constraint.
    • Added set_normalized_coefficients for modifying the variable coefficients of a vector-valued constraint.
    • Added set_string_names_on_creation to disable creating String names for variables and constraints. This can improve performance.

    Fixed

    • Fixed a bug passing nothing to the start keyword of @variable

    Other

    • New tutorials:
      • Sensitivity analysis of a linear program
      • Serving web apps
    • Minimal ellipse SDP tutorial refactored and improved
    • Docs updated to the latest version of each package
    • Lots of minor fixes and improvements to the documentation

    Version 1.0.0 (March 24, 2022)

    Read more about this release, along with an acknowledgement of all the contributors in our JuMP 1.0.0 is released blog post.

    Breaking

    • The previously deprecated functions (v0.23.0, v0.23.1) have been removed. Deprecation was to improve consistency of function names:
      • num_nl_constraints (see num_nonlinear_constraints)
      • all_nl_constraints (see all_nonlinear_constraints)
      • add_NL_expression (see add_nonlinear_expression)
      • set_NL_objective (see set_nonlinear_objective)
      • add_NL_constraint (see add_nonlinear_constraint)
      • nl_expr_string (see nonlinear_expr_string)
      • nl_constraint_string (see nonlinear_constraint_string)
      • SymMatrixSpace (see SymmetricMatrixSpace)
    • The unintentionally exported variable JuMP.op_hint has been renamed to the unexported JuMP._OP_HINT

    Fixed

    • Fixed a bug writing .nl files
    • Fixed a bug broadcasting SparseAxisArrays

    Version 0.23.2 (March 14, 2022)

    Added

    • Added relative_gap to solution_summary
    • register now throws an informative error if the function is not differentiable using ForwardDiff. In some cases, the check in register will encounter a false negative, and the informative error will be thrown at run-time. This usually happens when the function is non-differentiable in a subset of the domain.

    Fixed

    • Fixed a scoping issue when extending the container keyword of containers

    Other

    • Docs updated to the latest version of each package

    Version 0.23.1 (March 2, 2022)

    Deprecated

    • nl_expr_string and nl_constraint_string have been renamed to nonlinear_expr_string and nonlinear_constraint_string. The old methods still exist with deprecation warnings. This change should impact very few users because to call them you must rely on private internals of the nonlinear API. Users are encouraged to use sprint(show, x) instead, where x is the nonlinear expression or constraint of interest.

    Added

    • Added support for Base.abs2(x) where x is a variable or affine expression. This is mainly useful for complex-valued constraints.

    Fixed

    • Fixed addition of complex and real affine expressions
    • Fixed arithmetic for Complex-valued quadratic expressions
    • Fixed variable bounds passed as Rational{Int}(Inf)
    • Fixed printing of the coefficient (0 + 1im)
    • Fixed a bug when solution_summary is called prior to optimize!

    Version 0.23.0 (February 25, 2022)

    JuMP v0.23.0 is a breaking release. It is also a release-candidate for JuMP v1.0.0. That is, if no issues are found with the v0.23.0 release, then it will be re-tagged as v1.0.0.

    Breaking

    • Julia 1.6 is now the minimum supported version
    • MathOptInterface has been updated to v1.0.0
    • All previously deprecated functionality has been removed
    • PrintMode, REPLMode and IJuliaMode have been removed in favor of the MIME types MIME"text/plain" and MIME"text/latex". Replace instances of ::Type{REPLMode} with ::MIME"text/plain", REPLMode with MIME("text/plain"), ::Type{IJuliaMode} with ::MIME"text/latex", and IJuliaMode with MIME("text/latex").
    • Functions containing the nl_ acronym have been renamed to the more explicit nonlinear_. For example, num_nl_constraints is now num_nonlinear_constraints and set_NL_objective is now set_nonlinear_objective. Calls to the old functions throw an error explaining the new name.
    • SymMatrixSpace has been renamed to SymmetricMatrixSpace

    Added

    • Added nonlinear_dual_start_value and set_nonlinear_dual_start_value
    • Added preliminary support for Complex coefficient types

    Fixed

    • Fixed a bug in solution_summary

    Other

    • MILP examples have been migrated from GLPK to HiGHS
    • Fixed various typos
    • Improved section on setting constraint start values

    Troubleshooting problems when updating

    If you experience problems when updating, you are likely using previously deprecated functionality. (By default, Julia does not warn when you use deprecated features.)

    To find the deprecated features you are using, start Julia with --depwarn=yes:

    $ julia --depwarn=yes

    Then install JuMP v0.22.3:

    julia> using Pkg
    +julia> pkg"add JuMP@0.22.3"

    And then run your code. Apply any suggestions, or search the release notes below for advice on updating a specific deprecated feature.

    Version 0.22.3 (February 10, 2022)

    Fixed

    • Fixed a reproducibility issue in the TSP tutorial
    • Fixed a reproducibility issue in the max_cut_sdp tutorial
    • Fixed a bug broadcasting an empty SparseAxisArray

    Other

    • Added a warning and improved documentation for the modify-then-query case
    • Fixed a typo in the docstring of RotatedSecondOrderCone
    • Added Aqua.jl as a check for code health
    • Added introductions to each section of the tutorials
    • Improved the column generation and Benders decomposition tutorials
    • Updated documentation to MOI v0.10.8
    • Updated JuliaFormatter to v0.22.2

    Version 0.22.2 (January 10, 2022)

    Added

    • The function all_nl_constraints now returns all nonlinear constraints in a model
    • start_value and set_start_value can now be used to get and set the primal start for constraint references
    • Plural macros now return a tuple containing the elements that were defined instead of nothing
    • Anonymous variables are now printed as _[i] where i is the index of the variable instead of noname. Calling name(x) still returns "" so this is non-breaking.

    Fixed

    • Fixed handling of min and max in nonlinear expressions
    • CartesianIndex is no longer allowed as a key for DenseAxisArrays.

    Other

    • Improved the performance of GenericAffExpr
    • Added a tutorial on the Travelling Salesperson Problem
    • Added a tutorial on querying the Hessian of a nonlinear program
    • Added documentation on using custom solver binaries.

    Version 0.22.1 (November 29, 2021)

    Added

    • Export OptimizationSense enum, with instances: MIN_SENSE, MAX_SENSE, and FEASIBILITY_SENSE
    • Add Base.isempty(::Model) to match Base.empty(::Model)

    Fixed

    • Fix bug in container with tuples as indices
    • Fix bug in set_time_limit_sec

    Other

    • Add tutorial "Design patterns for larger models"
    • Remove release notes section from PDF
    • General edits of the documentation and error messages

    Version 0.22.0 (November 10, 2021)

    JuMP v0.22 is a breaking release

    Breaking

    JuMP 0.22 contains a number of breaking changes. However, these should be invisible for the majority of users. You will mostly encounter these breaking changes if you: wrote a JuMP extension, accessed backend(model), or called @SDconstraint.

    The breaking changes are as follows:

    • MathOptInterface has been updated to v0.10.4. For users who have interacted with the MOI backend, this contains a large number of breaking changes. Read the MathOptInterface release notes for more details.
    • The bridge_constraints keyword argument to Model and set_optimizer has been renamed add_bridges to reflect that more thing were bridged than just constraints.
    • The backend(model) field now contains a concrete instance of a MOI.Utilities.CachingOptimizer instead of one with an abstractly typed optimizer field. In most cases, this will lead to improved performance. However, calling set_optimizer after backend invalidates the old backend. For example:
      model = Model()
       b = backend(model)
       set_optimizer(model, GLPK.Optimizer)
       @variable(model, x)
       # b is not updated with `x`! Get a new b by calling `backend` again.
      -new_b = backend(model)
    • All usages of @SDconstraint are deprecated. The new syntax is @constraint(model, X >= Y, PSDCone()).
    • Creating a DenseAxisArray with a Number as an axis will now display a warning. This catches a common error in which users write @variable(model, x[length(S)]) instead of @variable(model, x[1:length(S)]).
    • The caching_mode argument to Model, for example, Model(caching_mode = MOIU.MANUAL) mode has been removed. For more control over the optimizer, use direct_model instead.
    • The previously deprecated lp_objective_perturbation_range and lp_rhs_perturbation_range functions have been removed. Use lp_sensitivity_report instead.
    • The .m fields of NonlinearExpression and NonlinearParameter have been renamed to .model.
    • Infinite variable bounds are now ignored. Thus, @variable(model, x <= Inf) will show has_upper_bound(x) == false. Previously, these bounds were passed through to the solvers which caused numerical issues for solvers expecting finite bounds.
    • The variable_type and constraint_type functions were removed. This should only affect users who previously wrote JuMP extensions. The functions can be deleted without consequence.
    • The internal functions moi_mode, moi_bridge_constraints, moi_add_constraint, and moi_add_to_function_constant are no longer exported.
    • The un-used method Containers.generate_container has been deleted.
    • The Containers API has been refactored, and _build_ref_sets is now public as Containers.build_ref_sets.
    • The parse_constraint_ methods for extending @constraint at parse time have been refactored in a breaking way. Consult the Extensions documentation for more details and examples.

    Added

    • The TerminationStatusCode and ResultStatusCode enums are now exported by JuMP. Prefer termination_status(model) == OPTIMAL instead of == MOI.OPTIMAL, although the MOI. prefix way still works.
    • Copy a x::DenseAxisArray to an Array by calling Array(x).
    • NonlinearExpression is now a subtype of AbstractJuMPScalar
    • Constraints such as @constraint(model, x + 1 in MOI.Integer()) are now supported.
    • primal_feasibility_report now accepts a function as the first argument.
    • Scalar variables @variable(model, x[1:2] in MOI.Integer()) creates two variables, both of which are constrained to be in the set MOI.Integer.
    • Conic constraints can now be specified as inequalities under a different partial ordering. So @constraint(model, x - y in MOI.Nonnegatives()) can now be written as @constraint(model, x >= y, MOI.Nonnegatives()).
    • Names are now set for vectorized constraints.

    Fixed

    • Fixed a performance issue when show was called on a SparseAxisArray with a large number of elements.
    • Fixed a bug displaying barrier and simplex iterations in solution_summary.
    • Fixed a bug by implementing hash for DenseAxisArray and SparseAxisArray.
    • Names are now only set if the solver supports them. Previously, this prevented solvers such as Ipopt from being used with direct_model.
    • MutableArithmetics.Zero is converted into a 0.0 before being returned to the user. Previously, some calls to @expression would return the undocumented MutableArithmetics.Zero() object. One example is summing over an empty set @expression(model, sum(x[i] for i in 1:0)). You will now get 0.0 instead.
    • AffExpr and QuadExpr can now be used with == 0 instead of iszero. This fixes a number of issues relating to Julia standard libraries such as LinearAlgebra and SparseArrays.
    • Fixed a bug when registering a user-defined function with splatting.

    Other

    • The documentation is now available as a PDF.
    • The documentation now includes a full copy of the MathOptInterface documentation to make it easy to link concepts between the docs. (The MathOptInterface documentation has also been significantly improved.)
    • The documentation contains a large number of improvements and clarifications on a range of topics. Thanks to @sshin23, @DilumAluthge, and @jlwether.
    • The documentation is now built with Julia 1.6 instead of 1.0.
    • Various error messages have been improved to be more readable.

    Version 0.21.10 (September 4, 2021)

    Added

    • Added add_NL_expression
    • add_NL_xxx functions now support AffExpr and QuadExpr as terms

    Fixed

    • Fixed a bug in solution_summary
    • Fixed a bug in relax_integrality

    Other

    • Improved error message in lp_sensitivity_report

    Version 0.21.9 (August 1, 2021)

    Added

    • Containers now support arbitrary container types by passing the type to the container keyword and overloading Containers.container.
    • is_valid now supports nonlinear constraints
    • Added unsafe_backend for querying the inner-most optimizer of a JuMP model.
    • Nonlinear parameters now support the plural @NLparameters macro.
    • Containers (for example, DenseAxisArray) can now be used in vector-valued constraints.

    Other

    • Various improvements to the documentation.

    Version 0.21.8 (May 8, 2021)

    Added

    • The @constraint macro is now extendable in the same way as @variable.
    • AffExpr and QuadExpr can now be used in nonlinear macros.

    Fixed

    • Fixed a bug in lp_sensitivity_report.
    • Fixed an inference issue when creating empty SparseAxisArrays.

    Version 0.21.7 (April 12, 2021)

    Added

    • Added primal_feasibility_report, which can be used to check whether a primal point satisfies primal feasibility.
    • Added coefficient, which returns the coefficient associated with a variable in affine and quadratic expressions.
    • Added copy_conflict, which returns the IIS of an infeasible model.
    • Added solution_summary, which returns (and prints) a struct containing a summary of the solution.
    • Allow AbstractVector in vector constraints instead of just Vector.
    • Added latex_formulation(model) which returns an object representing the latex formulation of a model. Use print(latex_formulation(model)) to print the formulation as a string.
    • User-defined functions in nonlinear expressions are now automatically registered to aid quick model prototyping. However, a warning is printed to encourage the manual registration.
    • DenseAxisArray's now support broadcasting over multiple arrays.
    • Container indices can now be iterators of Base.SizeUnknown.

    Fixed

    • Fixed bug in rad2deg and deg2rad in nonlinear expressions.
    • Fixed a MethodError bug in Containers when forcing container type.
    • Allow partial slicing of a DenseAxisArray, resolving an issue from 2014.
    • Fixed a bug printing variable names in IJulia.
    • Ending an IJulia cell with model now prints a summary of the model (like in the REPL) not the latex formulation. Use print(model) to print the latex formulation.
    • Fixed a bug when copying models containing nested arrays.

    Other

    • Tutorials are now part of the documentation, and more refactoring has taken place.
    • Added JuliaFormatter added as a code formatter.
    • Added some precompilation statements to reduce initial latency.
    • Various improvements to error messages to make them more helpful.
    • Improved performance of value(::NonlinearExpression).
    • Improved performance of fix(::VariableRef).

    Version 0.21.6 (January 29, 2021)

    Added

    • Added support for skew symmetric variables via @variable(model, X[1:2, 1:2] in SkewSymmetricMatrixSpace()).
    • lp_sensitivity_report has been added which significantly improves the performance of querying the sensitivity summary of an LP. lp_objective_perturbation_range and lp_rhs_perturbation_range are deprecated.
    • Dual warm-starts are now supported with set_dual_start_value and dual_start_value.
    • (\in<tab>) can now be used in macros instead of = or in.
    • Use haskey(model::Model, key::Symbol) to check if a name key is registered in a model.
    • Added unregister(model::Model, key::Symbol) to unregister a name key from model.
    • Added callback_node_status for use in callbacks.
    • Added print_bridge_graph to visualize the bridging graph generated by MathOptInterface.
    • Improved error message for containers with duplicate indices.

    Fixed

    • Various fixes to pass tests on Julia 1.6.
    • Fixed a bug in the printing of nonlinear expressions in IJulia.
    • Fixed a bug when nonlinear expressions are passed to user-defined functions.
    • Some internal functions that were previously exported are now no longer exported.
    • Fixed a bug when relaxing a fixed binary variable.
    • Fixed a StackOverflowError that occurred when SparseAxisArrays had a large number of elements.
    • Removed an unnecessary type assertion in list_of_constraint_types.
    • Fixed a bug when copying models with registered expressions.

    Other

    • The documentation has been significantly overhauled. It now has distinct sections for the manual, API reference, and examples. The existing examples in /examples have now been moved to /docs/src/examples and rewritten using Literate.jl, and they are now included in the documentation.
    • JuliaFormatter has been applied to most of the codebase. This will continue to roll out over time, as we fix upstream issues in the formatter, and will eventually become compulsory.
    • The root cause of a large number of method invalidations has been resolved.
    • We switched continuous integration from Travis and Appveyor to GitHub Actions.

    Version 0.21.5 (September 18, 2020)

    Fixed

    • Fixed deprecation warnings
    • Throw DimensionMismatch for incompatibly sized functions and sets
    • Unify treatment of keys(x) on JuMP containers

    Version 0.21.4 (September 14, 2020)

    Added

    • Add debug info when adding unsupported constraints
    • Add relax_integrality for solving continuous relaxation
    • Allow querying constraint conflicts

    Fixed

    • Dispatch on Real for MOI.submit
    • Implement copy for CustomSet in tests
    • Don't export private macros
    • Fix invalid assertion in nonlinear
    • Error if constraint has NaN right-hand side
    • Improve speed of tests
    • Lots of work modularizing files in /test
    • Improve line numbers in macro error messages
    • Print nonlinear subexpressions
    • Various documentation updates
    • Dependency updates:
      • Datastructures 0.18
      • MathOptFormat v0.5
      • Prep for MathOptInterface 0.9.15

    Version 0.21.3 (June 18, 2020)

    • Added Special Order Sets (SOS1 and SOS2) to JuMP with default weights to ease the creation of such constraints (#2212).
    • Added functions simplex_iterations, barrier_iterations and node_count (#2201).
    • Added function reduced_cost (#2205).
    • Implemented callback_value for affine and quadratic expressions (#2231).
    • Support MutableArithmetics.Zero in objective and constraints (#2219).
    • Documentation improvements:
      • Mention tutorials in the docs (#2223).
      • Update COIN-OR links (#2242).
      • Explicit link to the documentation of MOI.FileFormats (#2253).
      • Typo fixes (#2261).
    • Containers improvements:
      • Fix Base.map for DenseAxisArray (#2235).
      • Throw BoundsError if number of indices is incorrect for DenseAxisArray and SparseAxisArray (#2240).
    • Extensibility improvements:
      • Implement a set_objective method fallback that redirects to set_objective_sense and set_objective_function (#2247).
      • Add parse_constraint method with arbitrary number of arguments (#2051).
      • Add parse_constraint_expr and parse_constraint_head (#2228).

    Version 0.21.2 (April 2, 2020)

    • Added relative_gap() to access MOI.RelativeGap() attribute (#2199).
    • Documentation fixes:
      • Added link to source for docstrings in the documentation (#2207).
      • Added docstring for @variables macro (#2216).
      • Typo fixes (#2177, #2184, #2182).
    • Implementation of methods for Base functions:
      • Implemented Base.empty! for JuMP.Model (#2198).
      • Implemented Base.conj for JuMP scalar types (#2209).

    Fixed

    • Fixed sum of expression with scalar product in macro (#2178).
    • Fixed writing of nonlinear models to MathOptFormat (#2181).
    • Fixed construction of empty SparseAxisArray (#2179).
    • Fixed constraint with zero function (#2188).

    Version 0.21.1 (Feb 18, 2020)

    • Improved the clarity of the with_optimizer deprecation warning.

    Version 0.21.0 (Feb 16, 2020)

    Breaking

    • Deprecated with_optimizer (#2090, #2084, #2141). You can replace with_optimizer by either nothing, optimizer_with_attributes or a closure:

      • replace with_optimizer(Ipopt.Optimizer) by Ipopt.Optimizer.
      • replace with_optimizer(Ipopt.Optimizer, max_cpu_time=60.0) by optimizer_with_attributes(Ipopt.Optimizer, "max_cpu_time" => 60.0).
      • replace with_optimizer(Gurobi.Optimizer, env) by () -> Gurobi.Optimizer(env).
      • replace with_optimizer(Gurobi.Optimizer, env, Presolve=0) by optimizer_with_attributes(() -> Gurobi.Optimizer(env), "Presolve" => 0).

      alternatively to optimizer_with_attributes, you can also set the attributes separately with set_optimizer_attribute.

    • Renamed set_parameter and set_parameters to set_optimizer_attribute and set_optimizer_attributes (#2150).

    • Broadcast should now be explicit inside macros. @SDconstraint(model, x >= 1) and @constraint(model, x + 1 in SecondOrderCone()) now throw an error instead of broadcasting 1 along the dimension of x (#2107).

    • @SDconstraint(model, x >= 0) is now equivalent to @constraint(model, x in PSDCone()) instead of @constraint(model, (x .- 0) in PSDCone()) (#2107).

    • The macros now create the containers with map instead of for loops, as a consequence, containers created by @expression can now have any element type and containers of constraint references now have concrete element types when possible. This fixes a long-standing issue where @expression could only be used to generate a collection of linear expressions. Now it works for quadratic expressions as well (#2070).

    • Calling deepcopy(::AbstractModel) now throws an error.

    • The constraint name is now printed in the model string (#2108).

    Added

    • Added support for solver-independent and solver-specific callbacks (#2101).
    • Added write_to_file and read_from_file, supported formats are CBF, LP, MathOptFormat, MPS and SDPA (#2114).
    • Added support for complementarity constraints (#2132).
    • Added support for indicator constraints (#2092).
    • Added support for querying multiple solutions with the result keyword (#2100).
    • Added support for constraining variables on creation (#2128).
    • Added method delete that deletes a vector of variables at once if it is supported by the underlying solver (#2135).
    • The arithmetic between JuMP expression has be refactored into the MutableArithmetics package (#2107).
    • Improved error on complex values in NLP (#1978).
    • Added an example of column generation (#2010).

    Fixed

    • Incorrect coefficients generated when using Symmetric variables (#2102)

    Version 0.20.1 (Oct 18, 2019)

    • Add sections on @variables and @constraints in the documentation (#2062).
    • Fixed product of sparse matrices for Julia v1.3 (#2063).
    • Added set_objective_coefficient to modify the coefficient of a linear term of the objective function (#2008).
    • Added set_time_limit_sec, unset_time_limit_sec and time_limit_sec to set and query the time limit for the solver in seconds (#2053).

    Version 0.20.0 (Aug 24, 2019)

    • Documentation updates.
    • Numerous bug fixes.
    • Better error messages (#1977, #1978, #1997, #2017).
    • Performance improvements (#1947, #2032).
    • Added LP sensitivity summary functions lp_objective_perturbation_range and lp_rhs_perturbation_range (#1917).
    • Added functions dual_objective_value, raw_status and set_parameter.
    • Added function set_objective_coefficient to modify the coefficient of a linear term of the objective (#2008).
    • Added functions set_normalized_rhs, normalized_rhs, and add_to_function_constant to modify and get the constant part of a constraint (#1935, #1960).
    • Added functions set_normalized_coefficient and normalized_coefficient to modify and get the coefficient of a linear term of a constraint (#1935, #1960).
    • Numerous other improvements in MOI 0.9, see the NEWS.md file of MOI for more details.

    Version 0.19.2 (June 8, 2019)

    • Fix a bug in derivatives that could arise in models with nested nonlinear subexpressions.

    Version 0.19.1 (May 12, 2019)

    • Usability and performance improvements.
    • Bug fixes.

    Version 0.19.0 (February 15, 2019)

    JuMP 0.19 contains significant breaking changes.

    Breaking

    • JuMP's abstraction layer for communicating with solvers changed from MathProgBase (MPB) to MathOptInterface (MOI). MOI addresses many longstanding design issues. (See @mlubin's slides from JuMP-dev 2018.) JuMP 0.19 is compatible only with solvers that have been updated for MOI. See the installation guide for a list of solvers that have and have not yet been updated.

    • Most solvers have been renamed to PackageName.Optimizer. For example, GurobiSolver() is now Gurobi.Optimizer.

    • Solvers are no longer added to a model via Model(solver = XXX(kwargs...)). Instead use Model(with_optimizer(XXX, kwargs...)). For example, Model(with_optimizer(Gurobi.Optimizer, OutputFlag=0)).

    • JuMP containers (for example, the objects returned by @variable) have been redesigned. Containers.SparseAxisArray replaces JuMPDict, JuMPArray was rewritten (inspired by AxisArrays) and renamed Containers.DenseAxisArray, and you can now request a container type with the container= keyword to the macros. See the corresponding documentation for more details.

    • The statuses returned by solvers have changed. See the possible status values here. The MOI statuses are much richer than the MPB statuses and can be used to distinguish between previously indistinguishable cases (for example, did the solver have a feasible solution when it stopped because of the time limit?).

    • Starting values are separate from result values. Use value to query the value of a variable in a solution. Use start_value and set_start_value to get and set an initial starting point provided to the solver. The solutions from previous solves are no longer automatically set as the starting points for the next solve.

    • The data structures for affine and quadratic expressions AffExpr and QuadExpr have changed. Internally, terms are stored in dictionaries instead of lists. Duplicate coefficients can no longer exist. Accessors and iteration methods have changed.

    • JuMPNLPEvaluator no longer includes the linear and quadratic parts of the model in the evaluation calls. These are now handled separately to allow NLP solvers that support various types of constraints.

    • JuMP solver-independent callbacks have been replaced by solver-specific callbacks. See your favorite solver for more details. (See the note below: No solver-specific callbacks are implemented yet.)

    • The norm() syntax is no longer recognized inside macros. Use the SecondOrderCone() set instead.

    • JuMP no longer performs automatic transformation between special quadratic forms and second-order cone constraints. Support for these constraint classes depends on the solver.

    • The symbols :Min and :Max are no longer used as optimization senses. Instead, JuMP uses the OptimizationSense enum from MathOptInterface. @objective(model, Max, ...), @objective(model, Min, ...), @NLobjective(model, Max, ...), and @objective(model, Min, ...) remain valid, but @objective(m, :Max, ...) is no longer accepted.

    • The sign conventions for duals has changed in some cases for consistency with conic duality (see the documentation). The shadow_price helper method returns duals with signs that match conventional LP interpretations of dual values as sensitivities of the objective value to relaxations of constraints.

    • @constraintref is no longer defined. Instead, create the appropriate container to hold constraint references manually. For example,

      constraints = Dict() # Optionally, specify types for improved performance.
      +new_b = backend(model)
    • All usages of @SDconstraint are deprecated. The new syntax is @constraint(model, X >= Y, PSDCone()).
    • Creating a DenseAxisArray with a Number as an axis will now display a warning. This catches a common error in which users write @variable(model, x[length(S)]) instead of @variable(model, x[1:length(S)]).
    • The caching_mode argument to Model, for example, Model(caching_mode = MOIU.MANUAL) mode has been removed. For more control over the optimizer, use direct_model instead.
    • The previously deprecated lp_objective_perturbation_range and lp_rhs_perturbation_range functions have been removed. Use lp_sensitivity_report instead.
    • The .m fields of NonlinearExpression and NonlinearParameter have been renamed to .model.
    • Infinite variable bounds are now ignored. Thus, @variable(model, x <= Inf) will show has_upper_bound(x) == false. Previously, these bounds were passed through to the solvers which caused numerical issues for solvers expecting finite bounds.
    • The variable_type and constraint_type functions were removed. This should only affect users who previously wrote JuMP extensions. The functions can be deleted without consequence.
    • The internal functions moi_mode, moi_bridge_constraints, moi_add_constraint, and moi_add_to_function_constant are no longer exported.
    • The un-used method Containers.generate_container has been deleted.
    • The Containers API has been refactored, and _build_ref_sets is now public as Containers.build_ref_sets.
    • The parse_constraint_ methods for extending @constraint at parse time have been refactored in a breaking way. Consult the Extensions documentation for more details and examples.

    Added

    • The TerminationStatusCode and ResultStatusCode enums are now exported by JuMP. Prefer termination_status(model) == OPTIMAL instead of == MOI.OPTIMAL, although the MOI. prefix way still works.
    • Copy a x::DenseAxisArray to an Array by calling Array(x).
    • NonlinearExpression is now a subtype of AbstractJuMPScalar
    • Constraints such as @constraint(model, x + 1 in MOI.Integer()) are now supported.
    • primal_feasibility_report now accepts a function as the first argument.
    • Scalar variables @variable(model, x[1:2] in MOI.Integer()) creates two variables, both of which are constrained to be in the set MOI.Integer.
    • Conic constraints can now be specified as inequalities under a different partial ordering. So @constraint(model, x - y in MOI.Nonnegatives()) can now be written as @constraint(model, x >= y, MOI.Nonnegatives()).
    • Names are now set for vectorized constraints.

    Fixed

    • Fixed a performance issue when show was called on a SparseAxisArray with a large number of elements.
    • Fixed a bug displaying barrier and simplex iterations in solution_summary.
    • Fixed a bug by implementing hash for DenseAxisArray and SparseAxisArray.
    • Names are now only set if the solver supports them. Previously, this prevented solvers such as Ipopt from being used with direct_model.
    • MutableArithmetics.Zero is converted into a 0.0 before being returned to the user. Previously, some calls to @expression would return the undocumented MutableArithmetics.Zero() object. One example is summing over an empty set @expression(model, sum(x[i] for i in 1:0)). You will now get 0.0 instead.
    • AffExpr and QuadExpr can now be used with == 0 instead of iszero. This fixes a number of issues relating to Julia standard libraries such as LinearAlgebra and SparseArrays.
    • Fixed a bug when registering a user-defined function with splatting.

    Other

    • The documentation is now available as a PDF.
    • The documentation now includes a full copy of the MathOptInterface documentation to make it easy to link concepts between the docs. (The MathOptInterface documentation has also been significantly improved.)
    • The documentation contains a large number of improvements and clarifications on a range of topics. Thanks to @sshin23, @DilumAluthge, and @jlwether.
    • The documentation is now built with Julia 1.6 instead of 1.0.
    • Various error messages have been improved to be more readable.

    Version 0.21.10 (September 4, 2021)

    Added

    • Added add_NL_expression
    • add_NL_xxx functions now support AffExpr and QuadExpr as terms

    Fixed

    • Fixed a bug in solution_summary
    • Fixed a bug in relax_integrality

    Other

    • Improved error message in lp_sensitivity_report

    Version 0.21.9 (August 1, 2021)

    Added

    • Containers now support arbitrary container types by passing the type to the container keyword and overloading Containers.container.
    • is_valid now supports nonlinear constraints
    • Added unsafe_backend for querying the inner-most optimizer of a JuMP model.
    • Nonlinear parameters now support the plural @NLparameters macro.
    • Containers (for example, DenseAxisArray) can now be used in vector-valued constraints.

    Other

    • Various improvements to the documentation.

    Version 0.21.8 (May 8, 2021)

    Added

    • The @constraint macro is now extendable in the same way as @variable.
    • AffExpr and QuadExpr can now be used in nonlinear macros.

    Fixed

    • Fixed a bug in lp_sensitivity_report.
    • Fixed an inference issue when creating empty SparseAxisArrays.

    Version 0.21.7 (April 12, 2021)

    Added

    • Added primal_feasibility_report, which can be used to check whether a primal point satisfies primal feasibility.
    • Added coefficient, which returns the coefficient associated with a variable in affine and quadratic expressions.
    • Added copy_conflict, which returns the IIS of an infeasible model.
    • Added solution_summary, which returns (and prints) a struct containing a summary of the solution.
    • Allow AbstractVector in vector constraints instead of just Vector.
    • Added latex_formulation(model) which returns an object representing the latex formulation of a model. Use print(latex_formulation(model)) to print the formulation as a string.
    • User-defined functions in nonlinear expressions are now automatically registered to aid quick model prototyping. However, a warning is printed to encourage the manual registration.
    • DenseAxisArray's now support broadcasting over multiple arrays.
    • Container indices can now be iterators of Base.SizeUnknown.

    Fixed

    • Fixed bug in rad2deg and deg2rad in nonlinear expressions.
    • Fixed a MethodError bug in Containers when forcing container type.
    • Allow partial slicing of a DenseAxisArray, resolving an issue from 2014.
    • Fixed a bug printing variable names in IJulia.
    • Ending an IJulia cell with model now prints a summary of the model (like in the REPL) not the latex formulation. Use print(model) to print the latex formulation.
    • Fixed a bug when copying models containing nested arrays.

    Other

    • Tutorials are now part of the documentation, and more refactoring has taken place.
    • Added JuliaFormatter added as a code formatter.
    • Added some precompilation statements to reduce initial latency.
    • Various improvements to error messages to make them more helpful.
    • Improved performance of value(::NonlinearExpression).
    • Improved performance of fix(::VariableRef).

    Version 0.21.6 (January 29, 2021)

    Added

    • Added support for skew symmetric variables via @variable(model, X[1:2, 1:2] in SkewSymmetricMatrixSpace()).
    • lp_sensitivity_report has been added which significantly improves the performance of querying the sensitivity summary of an LP. lp_objective_perturbation_range and lp_rhs_perturbation_range are deprecated.
    • Dual warm-starts are now supported with set_dual_start_value and dual_start_value.
    • (\in<tab>) can now be used in macros instead of = or in.
    • Use haskey(model::Model, key::Symbol) to check if a name key is registered in a model.
    • Added unregister(model::Model, key::Symbol) to unregister a name key from model.
    • Added callback_node_status for use in callbacks.
    • Added print_bridge_graph to visualize the bridging graph generated by MathOptInterface.
    • Improved error message for containers with duplicate indices.

    Fixed

    • Various fixes to pass tests on Julia 1.6.
    • Fixed a bug in the printing of nonlinear expressions in IJulia.
    • Fixed a bug when nonlinear expressions are passed to user-defined functions.
    • Some internal functions that were previously exported are now no longer exported.
    • Fixed a bug when relaxing a fixed binary variable.
    • Fixed a StackOverflowError that occurred when SparseAxisArrays had a large number of elements.
    • Removed an unnecessary type assertion in list_of_constraint_types.
    • Fixed a bug when copying models with registered expressions.

    Other

    • The documentation has been significantly overhauled. It now has distinct sections for the manual, API reference, and examples. The existing examples in /examples have now been moved to /docs/src/examples and rewritten using Literate.jl, and they are now included in the documentation.
    • JuliaFormatter has been applied to most of the codebase. This will continue to roll out over time, as we fix upstream issues in the formatter, and will eventually become compulsory.
    • The root cause of a large number of method invalidations has been resolved.
    • We switched continuous integration from Travis and Appveyor to GitHub Actions.

    Version 0.21.5 (September 18, 2020)

    Fixed

    • Fixed deprecation warnings
    • Throw DimensionMismatch for incompatibly sized functions and sets
    • Unify treatment of keys(x) on JuMP containers

    Version 0.21.4 (September 14, 2020)

    Added

    • Add debug info when adding unsupported constraints
    • Add relax_integrality for solving continuous relaxation
    • Allow querying constraint conflicts

    Fixed

    • Dispatch on Real for MOI.submit
    • Implement copy for CustomSet in tests
    • Don't export private macros
    • Fix invalid assertion in nonlinear
    • Error if constraint has NaN right-hand side
    • Improve speed of tests
    • Lots of work modularizing files in /test
    • Improve line numbers in macro error messages
    • Print nonlinear subexpressions
    • Various documentation updates
    • Dependency updates:
      • Datastructures 0.18
      • MathOptFormat v0.5
      • Prep for MathOptInterface 0.9.15

    Version 0.21.3 (June 18, 2020)

    • Added Special Order Sets (SOS1 and SOS2) to JuMP with default weights to ease the creation of such constraints (#2212).
    • Added functions simplex_iterations, barrier_iterations and node_count (#2201).
    • Added function reduced_cost (#2205).
    • Implemented callback_value for affine and quadratic expressions (#2231).
    • Support MutableArithmetics.Zero in objective and constraints (#2219).
    • Documentation improvements:
      • Mention tutorials in the docs (#2223).
      • Update COIN-OR links (#2242).
      • Explicit link to the documentation of MOI.FileFormats (#2253).
      • Typo fixes (#2261).
    • Containers improvements:
      • Fix Base.map for DenseAxisArray (#2235).
      • Throw BoundsError if number of indices is incorrect for DenseAxisArray and SparseAxisArray (#2240).
    • Extensibility improvements:
      • Implement a set_objective method fallback that redirects to set_objective_sense and set_objective_function (#2247).
      • Add parse_constraint method with arbitrary number of arguments (#2051).
      • Add parse_constraint_expr and parse_constraint_head (#2228).

    Version 0.21.2 (April 2, 2020)

    • Added relative_gap() to access MOI.RelativeGap() attribute (#2199).
    • Documentation fixes:
      • Added link to source for docstrings in the documentation (#2207).
      • Added docstring for @variables macro (#2216).
      • Typo fixes (#2177, #2184, #2182).
    • Implementation of methods for Base functions:
      • Implemented Base.empty! for JuMP.Model (#2198).
      • Implemented Base.conj for JuMP scalar types (#2209).

    Fixed

    • Fixed sum of expression with scalar product in macro (#2178).
    • Fixed writing of nonlinear models to MathOptFormat (#2181).
    • Fixed construction of empty SparseAxisArray (#2179).
    • Fixed constraint with zero function (#2188).

    Version 0.21.1 (Feb 18, 2020)

    • Improved the clarity of the with_optimizer deprecation warning.

    Version 0.21.0 (Feb 16, 2020)

    Breaking

    • Deprecated with_optimizer (#2090, #2084, #2141). You can replace with_optimizer by either nothing, optimizer_with_attributes or a closure:

      • replace with_optimizer(Ipopt.Optimizer) by Ipopt.Optimizer.
      • replace with_optimizer(Ipopt.Optimizer, max_cpu_time=60.0) by optimizer_with_attributes(Ipopt.Optimizer, "max_cpu_time" => 60.0).
      • replace with_optimizer(Gurobi.Optimizer, env) by () -> Gurobi.Optimizer(env).
      • replace with_optimizer(Gurobi.Optimizer, env, Presolve=0) by optimizer_with_attributes(() -> Gurobi.Optimizer(env), "Presolve" => 0).

      alternatively to optimizer_with_attributes, you can also set the attributes separately with set_optimizer_attribute.

    • Renamed set_parameter and set_parameters to set_optimizer_attribute and set_optimizer_attributes (#2150).

    • Broadcast should now be explicit inside macros. @SDconstraint(model, x >= 1) and @constraint(model, x + 1 in SecondOrderCone()) now throw an error instead of broadcasting 1 along the dimension of x (#2107).

    • @SDconstraint(model, x >= 0) is now equivalent to @constraint(model, x in PSDCone()) instead of @constraint(model, (x .- 0) in PSDCone()) (#2107).

    • The macros now create the containers with map instead of for loops, as a consequence, containers created by @expression can now have any element type and containers of constraint references now have concrete element types when possible. This fixes a long-standing issue where @expression could only be used to generate a collection of linear expressions. Now it works for quadratic expressions as well (#2070).

    • Calling deepcopy(::AbstractModel) now throws an error.

    • The constraint name is now printed in the model string (#2108).

    Added

    • Added support for solver-independent and solver-specific callbacks (#2101).
    • Added write_to_file and read_from_file, supported formats are CBF, LP, MathOptFormat, MPS and SDPA (#2114).
    • Added support for complementarity constraints (#2132).
    • Added support for indicator constraints (#2092).
    • Added support for querying multiple solutions with the result keyword (#2100).
    • Added support for constraining variables on creation (#2128).
    • Added method delete that deletes a vector of variables at once if it is supported by the underlying solver (#2135).
    • The arithmetic between JuMP expression has be refactored into the MutableArithmetics package (#2107).
    • Improved error on complex values in NLP (#1978).
    • Added an example of column generation (#2010).

    Fixed

    • Incorrect coefficients generated when using Symmetric variables (#2102)

    Version 0.20.1 (Oct 18, 2019)

    • Add sections on @variables and @constraints in the documentation (#2062).
    • Fixed product of sparse matrices for Julia v1.3 (#2063).
    • Added set_objective_coefficient to modify the coefficient of a linear term of the objective function (#2008).
    • Added set_time_limit_sec, unset_time_limit_sec and time_limit_sec to set and query the time limit for the solver in seconds (#2053).

    Version 0.20.0 (Aug 24, 2019)

    • Documentation updates.
    • Numerous bug fixes.
    • Better error messages (#1977, #1978, #1997, #2017).
    • Performance improvements (#1947, #2032).
    • Added LP sensitivity summary functions lp_objective_perturbation_range and lp_rhs_perturbation_range (#1917).
    • Added functions dual_objective_value, raw_status and set_parameter.
    • Added function set_objective_coefficient to modify the coefficient of a linear term of the objective (#2008).
    • Added functions set_normalized_rhs, normalized_rhs, and add_to_function_constant to modify and get the constant part of a constraint (#1935, #1960).
    • Added functions set_normalized_coefficient and normalized_coefficient to modify and get the coefficient of a linear term of a constraint (#1935, #1960).
    • Numerous other improvements in MOI 0.9, see the NEWS.md file of MOI for more details.

    Version 0.19.2 (June 8, 2019)

    • Fix a bug in derivatives that could arise in models with nested nonlinear subexpressions.

    Version 0.19.1 (May 12, 2019)

    • Usability and performance improvements.
    • Bug fixes.

    Version 0.19.0 (February 15, 2019)

    JuMP 0.19 contains significant breaking changes.

    Breaking

    • JuMP's abstraction layer for communicating with solvers changed from MathProgBase (MPB) to MathOptInterface (MOI). MOI addresses many longstanding design issues. (See @mlubin's slides from JuMP-dev 2018.) JuMP 0.19 is compatible only with solvers that have been updated for MOI. See the installation guide for a list of solvers that have and have not yet been updated.

    • Most solvers have been renamed to PackageName.Optimizer. For example, GurobiSolver() is now Gurobi.Optimizer.

    • Solvers are no longer added to a model via Model(solver = XXX(kwargs...)). Instead use Model(with_optimizer(XXX, kwargs...)). For example, Model(with_optimizer(Gurobi.Optimizer, OutputFlag=0)).

    • JuMP containers (for example, the objects returned by @variable) have been redesigned. Containers.SparseAxisArray replaces JuMPDict, JuMPArray was rewritten (inspired by AxisArrays) and renamed Containers.DenseAxisArray, and you can now request a container type with the container= keyword to the macros. See the corresponding documentation for more details.

    • The statuses returned by solvers have changed. See the possible status values here. The MOI statuses are much richer than the MPB statuses and can be used to distinguish between previously indistinguishable cases (for example, did the solver have a feasible solution when it stopped because of the time limit?).

    • Starting values are separate from result values. Use value to query the value of a variable in a solution. Use start_value and set_start_value to get and set an initial starting point provided to the solver. The solutions from previous solves are no longer automatically set as the starting points for the next solve.

    • The data structures for affine and quadratic expressions AffExpr and QuadExpr have changed. Internally, terms are stored in dictionaries instead of lists. Duplicate coefficients can no longer exist. Accessors and iteration methods have changed.

    • JuMPNLPEvaluator no longer includes the linear and quadratic parts of the model in the evaluation calls. These are now handled separately to allow NLP solvers that support various types of constraints.

    • JuMP solver-independent callbacks have been replaced by solver-specific callbacks. See your favorite solver for more details. (See the note below: No solver-specific callbacks are implemented yet.)

    • The norm() syntax is no longer recognized inside macros. Use the SecondOrderCone() set instead.

    • JuMP no longer performs automatic transformation between special quadratic forms and second-order cone constraints. Support for these constraint classes depends on the solver.

    • The symbols :Min and :Max are no longer used as optimization senses. Instead, JuMP uses the OptimizationSense enum from MathOptInterface. @objective(model, Max, ...), @objective(model, Min, ...), @NLobjective(model, Max, ...), and @objective(model, Min, ...) remain valid, but @objective(m, :Max, ...) is no longer accepted.

    • The sign conventions for duals has changed in some cases for consistency with conic duality (see the documentation). The shadow_price helper method returns duals with signs that match conventional LP interpretations of dual values as sensitivities of the objective value to relaxations of constraints.

    • @constraintref is no longer defined. Instead, create the appropriate container to hold constraint references manually. For example,

      constraints = Dict() # Optionally, specify types for improved performance.
       for i in 1:N
         constraints[i] = @constraint(model, ...)
      -end
    • The lowerbound, upperbound, and basename keyword arguments to the @variable macro have been renamed to lower_bound, upper_bound, and base_name, for consistency with JuMP's new style recommendations.

    • We rely on broadcasting syntax to apply accessors to collections of variables, for example, value.(x) instead of getvalue(x) for collections. (Use value(x) when x is a scalar object.)

    Added

    • Splatting (like f(x...)) is recognized in restricted settings in nonlinear expressions.

    • Support for deleting constraints and variables.

    • The documentation has been completely rewritten using docstrings and Documenter.

    • Support for modeling mixed conic and quadratic models (for example, conic models with quadratic objectives and bi-linear matrix inequalities).

    • Significantly improved support for modeling new types of constraints and for extending JuMP's macros.

    • Support for providing dual warm starts.

    • Improved support for accessing solver-specific attributes (for example, the irreducible inconsistent subsystem).

    • Explicit control of whether symmetry-enforcing constraints are added to PSD constraints.

    • Support for modeling exponential cones.

    • Significant improvements in internal code quality and testing.

    • Style and naming guidelines.

    • Direct mode and manual mode provide explicit control over when copies of a model are stored or regenerated. See the corresponding documentation.

    Regressions

    There are known regressions from JuMP 0.18 that will be addressed in a future release (0.19.x or later):

    • Performance regressions in model generation (issue). Please file an issue anyway if you notice a significant performance regression. We have plans to address a number of performance issues, but we might not be aware of all of them.

    • Fast incremental NLP solves are not yet reimplemented (issue).

    • We do not yet have an implementation of solver-specific callbacks.

    • The column generation syntax in @variable has been removed (that is, the objective, coefficients, and inconstraints keyword arguments). Support for column generation will be re-introduced in a future release.

    • The ability to solve the continuous relaxation (that is, via solve(model; relaxation = true)) is not yet reimplemented (issue).

    Version 0.18.5 (December 1, 2018)

    • Support views in some derivative evaluation functions.
    • Improved compatibility with PackageCompiler.

    Version 0.18.4 (October 8, 2018)

    • Fix a bug in model printing on Julia 0.7 and 1.0.

    Version 0.18.3 (October 1, 2018)

    • Add support for Julia v1.0 (Thanks @ExpandingMan)
    • Fix matrix expressions with quadratic functions (#1508)

    Version 0.18.2 (June 10, 2018)

    • Fix a bug in second-order derivatives when expressions are present (#1319)
    • Fix a bug in @constraintref (#1330)

    Version 0.18.1 (April 9, 2018)

    • Fix for nested tuple destructuring (#1193)
    • Preserve internal model when relaxation=true (#1209)
    • Minor bug fixes and updates for example

    Version 0.18.0 (July 27, 2017)

    • Drop support for Julia 0.5.
    • Update for ForwardDiff 0.5.
    • Minor bug fixes.

    Version 0.17.1 (June 9, 2017)

    • Use of constructconstraint! in @SDconstraint.
    • Minor bug fixes.

    Version 0.17.0 (May 27, 2017)

    • Breaking change: Mixing quadratic and conic constraints is no longer supported.
    • Breaking change: The getvariable and getconstraint functions are replaced by indexing on the corresponding symbol. For instance, to access the variable with name x, one should now write m[:x] instead of getvariable(m, :x). As a consequence, creating a variable and constraint with the same name now triggers a warning, and accessing one of them afterwards throws an error. This change is breaking only in the latter case.
    • Addition of the getobjectivebound function that mirrors the functionality of the MathProgBase getobjbound function except that it takes into account transformations performed by JuMP.
    • Minor bug fixes.

    The following changes are primarily of interest to developers of JuMP extensions:

    • The new syntax @constraint(model, expr in Cone) creates the constraint ensuring that expr is inside Cone. The Cone argument is passed to constructconstraint! which enables the call to the dispatched to an extension.
    • The @variable macro now calls constructvariable! instead of directly calling the Variable constructor. Extra arguments and keyword arguments passed to @variable are passed to constructvariable! which enables the call to be dispatched to an extension.
    • Refactor the internal function conicdata (used build the MathProgBase conic model) into smaller sub-functions to make these parts reusable by extensions.

    Version 0.16.2 (March 28, 2017)

    • Minor bug fixes and printing tweaks
    • Address deprecation warnings for Julia 0.6

    Version 0.16.1 (March 7, 2017)

    • Better support for AbstractArray in JuMP (Thanks @tkoolen)
    • Minor bug fixes

    Version 0.16.0 (February 23, 2017)

    • Breaking change: JuMP no longer has a mechanism for selecting solvers by default (the previous mechanism was flawed and incompatible with Julia 0.6). Not specifying a solver before calling solve() will result in an error.
    • Breaking change: User-defined functions are no longer global. The first argument to JuMP.register is now a JuMP Model object within whose scope the function will be registered. Calling JuMP.register without a Model now produces an error.
    • Breaking change: Use the new JuMP.fix method to fix a variable to a value or to update the value to which a variable is fixed. Calling setvalue on a fixed variable now results in an error in order to avoid silent behavior changes. (Thanks @joaquimg)
    • Nonlinear expressions now print out similarly to linear/quadratic expressions (useful for debugging!)
    • New category keyword to @variable. Used for specifying categories of anonymous variables.
    • Compatibility with Julia 0.6-dev.
    • Minor fixes and improvements (Thanks @cossio, @ccoffrin, @blegat)

    Version 0.15.1 (January 31, 2017)

    • Bugfix for @LinearConstraints and friends

    Version 0.15.0 (December 22, 2016)

    • Julia 0.5.0 is the minimum required version for this release.
    • Document support for BARON solver
    • Enable info callbacks in more states than before, for example, for recording solutions. New when argument to addinfocallback (#814, thanks @yeesian)
    • Improved support for anonymous variables. This includes new warnings for potentially confusing use of the traditional non-anonymous syntax:
      • When multiple variables in a model are given the same name
      • When non-symbols are used as names, for example, @variable(m, x[1][1:N])
    • Improvements in iterating over JuMP containers (#836, thanks @IssamT)
    • Support for writing variable names in .lp file output (Thanks @leethargo)
    • Support for querying duals to SDP problems (Thanks @blegat)
    • The comprehension syntax with curly braces sum{}, prod{}, and norm2{} has been deprecated in favor of Julia's native comprehension syntax sum(), prod() and norm() as previously announced. (For early adopters of the new syntax, norm2() was renamed to norm() without deprecation.)
    • Unit tests rewritten to use Base.Test instead of FactCheck
    • Improved support for operations with matrices of JuMP types (Thanks @ExpandingMan)
    • The syntax to halt a solver from inside a callback has changed from throw(CallbackAbort()) to return JuMP.StopTheSolver
    • Minor bug fixes

    Version 0.14.2 (December 12, 2016)

    • Allow singleton anonymous variables (includes bugfix)

    Version 0.14.1 (September 12, 2016)

    • More consistent handling of states in informational callbacks, includes a new when parameter to addinfocallback for specifying in which state an informational callback should be called.

    Version 0.14.0 (August 7, 2016)

    • Compatibility with Julia 0.5 and ForwardDiff 0.2
    • Support for "anonymous" variables, constraints, expressions, and parameters, for example, x = @variable(m, [1:N]) instead of @variable(m, x[1:N])
    • Support for retrieving constraints from a model by name via getconstraint
    • @NLconstraint now returns constraint references (as expected).
    • Support for vectorized expressions within lazy constraints
    • On Julia 0.5, parse new comprehension syntax sum(x[i] for i in 1:N if isodd(i)) instead of sum{ x[i], i in 1:N; isodd(i) }. The old syntax with curly braces will be deprecated in JuMP 0.15.
    • Now possible to provide nonlinear expressions as "raw" Julia Expr objects instead of using JuMP's nonlinear macros. This input format is useful for programmatically generated expressions.
    • s/Mathematical Programming/Mathematical Optimization/
    • Support for local cuts (Thanks to @madanim, Mehdi Madani)
    • Document Xpress interface developed by @joaquimg, Joaquim Dias Garcia
    • Minor bug and deprecation fixes (Thanks @odow, @jrevels)

    Version 0.13.2 (May 16, 2016)

    • Compatibility update for MathProgBase

    Version 0.13.1 (May 3, 2016)

    • Fix broken deprecation for registerNLfunction.

    Version 0.13.0 (April 29, 2016)

    • Most exported methods and macros have been renamed to avoid camelCase. See the list of changes here. There is a 1-1 mapping from the old names to the new, and it is safe to simply replace the names to update existing models.
    • Specify variable lower/upper bounds in @variable using the lowerbound and upperbound keyword arguments.
    • Change name printed for variable using the basename keyword argument to @variable.
    • New @variables macro allows multi-line declaration of groups of variables.
    • A number of solver methods previously available only through MathProgBase are now exposed directly in JuMP. The fix was recorded live.
    • Compatibility fixes with Julia 0.5.
    • The "end" indexing syntax is no longer supported within JuMPArrays which do not use 1-based indexing until upstream issues are resolved, see here.

    Version 0.12.2 (March 9, 2016)

    • Small fixes for nonlinear optimization

    Version 0.12.1 (March 1, 2016)

    • Fix a regression in slicing for JuMPArrays (when not using 1-based indexing)

    Version 0.12.0 (February 27, 2016)

    • The automatic differentiation functionality has been completely rewritten with a number of user-facing changes:
      • @defExpr and @defNLExpr now take the model as the first argument. The previous one-argument version of @defExpr is deprecated; all expressions should be named. For example, replace @defExpr(2x+y) with @defExpr(jump_model, my_expr, 2x+y).
      • JuMP no longer uses Julia's variable binding rules for efficiently re-solving a sequence of nonlinear models. Instead, we have introduced nonlinear parameters. This is a breaking change, so we have added a warning message when we detect models that may depend on the old behavior.
      • Support for user-defined functions integrated within nonlinear JuMP expressions.
    • Replaced iteration over AffExpr with Number-like scalar iteration; previous iteration behavior is now available via linearterms(::AffExpr).
    • Stopping the solver via throw(CallbackAbort()) from a callback no longer triggers an exception. Instead, solve() returns UserLimit status.
    • getDual() now works for conic problems (Thanks @emreyamangil.)

    Version 0.11.3 (February 4, 2016)

    • Bug-fix for problems with quadratic objectives and semidefinite constraints

    Version 0.11.2 (January 14, 2016)

    • Compatibility update for Mosek

    Version 0.11.1 (December 1, 2015)

    • Remove usage of @compat in tests.
    • Fix updating quadratic objectives for nonlinear models.

    Version 0.11.0 (November 30, 2015)

    • Julia 0.4.0 is the minimum required version for this release.
    • Fix for scoping semantics of index variables in sum{}. Index variables no longer leak into the surrounding scope.
    • Addition of the solve(m::Model, relaxation=true) keyword argument to solve the standard continuous relaxation of model m
    • The getConstraintBounds() method allows access to the lower and upper bounds of all constraints in a (nonlinear) model.
    • Update for breaking changes in MathProgBase

    Version 0.10.3 (November 20, 2015)

    • Fix a rare error when parsing quadratic expressions
    • Fix Variable() constructor with default arguments
    • Detect unrecognized keywords in solve()

    Version 0.10.2 (September 28, 2015)

    • Fix for deprecation warnings

    Version 0.10.1 (September 3, 2015)

    • Fixes for ambiguity warnings.
    • Fix for breaking change in precompilation syntax in Julia 0.4-pre

    Version 0.10.0 (August 31, 2015)

    • Support (on Julia 0.4 and later) for conditions in indexing @defVar and @addConstraint constructs, for example, @defVar(m, x[i=1:5,j=1:5; i+j >= 3])
    • Support for vectorized operations on Variables and expressions. See the documentation for details.
    • New getVar() method to access variables in a model by name
    • Support for semidefinite programming.
    • Dual solutions are now available for general nonlinear problems. You may call getDual on a reference object for a nonlinear constraint, and getDual on a variable object for Lagrange multipliers from active bounds.
    • Introduce warnings for two common performance traps: too many calls to getValue() on a collection of variables and use of the + operator in a loop to sum expressions.
    • Second-order cone constraints can be written directly with the norm() and norm2{} syntax.
    • Implement MathProgBase interface for querying Hessian-vector products.
    • Iteration over JuMPContainers is deprecated; instead, use the keys and values functions, and zip(keys(d),values(d)) for the old behavior.
    • @defVar returns Array{Variable,N} when each of N index sets are of the form 1:nᵢ.
    • Module precompilation: on Julia 0.4 and later, using JuMP is now much faster.

    Version 0.9.3 (August 11, 2015)

    • Fixes for FactCheck testing on julia v0.4.

    Version 0.9.2 (June 27, 2015)

    • Fix bug in @addConstraints.

    Version 0.9.1 (April 25, 2015)

    • Fix for Julia 0.4-dev.
    • Small infrastructure improvements for extensions.

    Version 0.9.0 (April 18, 2015)

    • Comparison operators for constructing constraints (for example, 2x >= 1) have been deprecated. Instead, construct the constraints explicitly in the @addConstraint macro to add them to the model, or in the @LinearConstraint macro to create a stand-alone linear constraint instance.
    • getValue() method implemented to compute the value of a nonlinear subexpression
    • JuMP is now released under the Mozilla Public License version 2.0 (was previously LGPL). MPL is a copyleft license which is less restrictive than LGPL, especially for embedding JuMP within other applications.
    • A number of performance improvements in ReverseDiffSparse for computing derivatives.
    • MathProgBase.getsolvetime(m) now returns the solution time reported by the solver, if available. (Thanks @odow, Oscar Dowson)
    • Formatting fix for LP format output. (Thanks @sbebo, Leonardo Taccari).

    Version 0.8.0 (February 17, 2015)

    • Nonlinear subexpressions now supported with the @defNLExpr macro.
    • SCS supported for solving second-order conic problems.
    • setXXXCallback family deprecated in favor of addXXXCallback.
    • Multiple callbacks of the same type can be registered.
    • Added support for informational callbacks via addInfoCallback.
    • A CallbackAbort exception can be thrown from callback to safely exit optimization.

    Version 0.7.4 (February 4, 2015)

    • Reduced costs and linear constraint duals are now accessible when quadratic constraints are present.
    • Two-sided nonlinear constraints are supported.
    • Methods for accessing the number of variables and constraints in a model are renamed.
    • New default procedure for setting initial values in nonlinear optimization: project zero onto the variable bounds.
    • Small bug fixes.

    Version 0.7.3 (January 14, 2015)

    • Fix a method ambiguity conflict with Compose.jl (cosmetic fix)

    Version 0.7.2 (January 9, 2015)

    • Fix a bug in sum(::JuMPDict)
    • Added the setCategory function to change a variables category (for example, continuous or binary)

    after construction, and getCategory to retrieve the variable category.

    Version 0.7.1 (January 2, 2015)

    • Fix a bug in parsing linear expressions in macros. Affects only Julia 0.4 and later.

    Version 0.7.0 (December 29, 2014)

    Linear/quadratic/conic programming

    • Breaking change: The syntax for column-wise model generation has been changed to use keyword arguments in @defVar.
    • On Julia 0.4 and later, variables and coefficients may be multiplied in any order within macros. That is, variable*coefficient is now valid syntax.
    • ECOS supported for solving second-order conic problems.

    Nonlinear programming

    • Support for skipping model generation when solving a sequence of nonlinear models with changing data.
    • Fix a memory leak when solving a sequence of nonlinear models.
    • The @addNLConstraint macro now supports the three-argument version to define sets of nonlinear constraints.
    • KNITRO supported as a nonlinear solver.
    • Speed improvements for model generation.
    • The @addNLConstraints macro supports adding multiple (groups of) constraints at once. Syntax is similar to @addConstraints.
    • Discrete variables allowed in nonlinear problems for solvers which support them (currently only KNITRO).

    General

    • Starting values for variables may now be specified with @defVar(m, x, start=value).
    • The setSolver function allows users to change the solver subsequent to model creation.
    • Support for "fixed" variables via the @defVar(m, x == 1) syntax.
    • Unit tests rewritten to use FactCheck.jl, improved testing across solvers.

    Version 0.6.3 (October 19, 2014)

    • Fix a bug in multiplying two AffExpr objects.

    Version 0.6.2 (October 11, 2014)

    • Further improvements and bug fixes for printing.
    • Fixed a bug in @defExpr.
    • Support for accessing expression graphs through the MathProgBase NLP interface.

    Version 0.6.1 (September 19, 2014)

    • Improvements and bug fixes for printing.

    Version 0.6.0 (September 9, 2014)

    • Julia 0.3.0 is the minimum required version for this release.
    • buildInternalModel(m::Model) added to build solver-level model in memory without optimizing.
    • Deprecate load_model_only keyword argument to solve.
    • Add groups of constraints with @addConstraints macro.
    • Unicode operators now supported, including for sum, for prod, and /
    • Quadratic constraints supported in @addConstraint macro.
    • Quadratic objectives supported in @setObjective macro.
    • MathProgBase solver-independent interface replaces Ipopt-specific interface for nonlinear problems
      • Breaking change: IpoptOptions no longer supported to specify solver options, use m = Model(solver=IpoptSolver(options...)) instead.
    • New solver interfaces: ECOS, NLopt, and nonlinear support for MOSEK
    • New option to control whether the lazy constraint callback is executed at each node in the B&B tree or just when feasible solutions are found
    • Add support for semicontinuous and semi-integer variables for those solvers that support them.
    • Add support for index dependencies (for example, triangular indexing) in @defVar, @addConstraint, and @defExpr (for example, @defVar(m, x[i=1:10,j=i:10])).
      • This required some changes to the internal structure of JuMP containers, which may break code that explicitly stored JuMPDict objects.

    Version 0.5.8 (September 24, 2014)

    • Fix a bug with specifying solvers (affects Julia 0.2 only)

    Version 0.5.7 (September 5, 2014)

    • Fix a bug in printing models

    Version 0.5.6 (September 2, 2014)

    • Add support for semicontinuous and semi-integer variables for those solvers that support them.
      • Breaking change: Syntax for Variable() constructor has changed (use of this interface remains discouraged)
    • Update for breaking changes in MathProgBase

    Version 0.5.5 (July 6, 2014)

    • Fix bug with problem modification: adding variables that did not appear in existing constraints or objective.

    Version 0.5.4 (June 19, 2014)

    • Update for breaking change in MathProgBase which reduces loading times for using JuMP
    • Fix error when MIPs not solved to optimality

    Version 0.5.3 (May 21, 2014)

    • Update for breaking change in ReverseDiffSparse

    Version 0.5.2 (May 9, 2014)

    • Fix compatibility with Julia 0.3 prerelease

    Version 0.5.1 (May 5, 2014)

    • Fix a bug in coefficient handling inside lazy constraints and user cuts

    Version 0.5.0 (May 2, 2014)

    • Support for nonlinear optimization with exact, sparse second-order derivatives automatically computed. Ipopt is currently the only solver supported.
    • getValue for AffExpr and QuadExpr
    • Breaking change: getSolverModel replaced by getInternalModel, which returns the internal MathProgBase-level model
    • Groups of constraints can be specified with @addConstraint (see documentation for details). This is not a breaking change.
    • dot(::JuMPDict{Variable},::JuMPDict{Variable}) now returns the corresponding quadratic expression.

    Version 0.4.1 (March 24, 2014)

    • Fix bug where change in objective sense was ignored when re-solving a model.
    • Fix issue with handling zero coefficients in AffExpr.

    Version 0.4.0 (March 10, 2014)

    • Support for SOS1 and SOS2 constraints.
    • Solver-independent callback for user heuristics.
    • dot and sum implemented for JuMPDict objects. Now you can say @addConstraint(m, dot(a,x) <= b).
    • Developers: support for extensions to JuMP. See definition of Model in src/JuMP.jl for more details.
    • Option to construct the low-level model before optimizing.

    Version 0.3.2 (February 17, 2014)

    • Improved model printing
      • Preliminary support for IJulia output

    Version 0.3.1 (January 30, 2014)

    • Documentation updates
    • Support for MOSEK
    • CPLEXLink renamed to CPLEX

    Version 0.3.0 (January 21, 2014)

    • Unbounded/infeasibility rays: getValue() will return the corresponding components of an unbounded ray when a model is unbounded, if supported by the selected solver. getDual() will return an infeasibility ray (Farkas proof) if a model is infeasible and the selected solver supports this feature.
    • Solver-independent callbacks for user generated cuts.
    • Use new interface for solver-independent QCQP.
    • setlazycallback renamed to setLazyCallback for consistency.

    Version 0.2.0 (December 15, 2013)

    Breaking

    • Objective sense is specified in setObjective instead of in the Model constructor.
    • lpsolver and mipsolver merged into single solver option.

    Added

    • Problem modification with efficient LP restarts and MIP warm-starts.
    • Relatedly, column-wise modeling now supported.
    • Solver-independent callbacks supported. Currently we support only a "lazy constraint" callback, which works with Gurobi, CPLEX, and GLPK. More callbacks coming soon.

    Version 0.1.2 (November 16, 2013)

    • Bug fixes for printing, improved error messages.
    • Allow AffExpr to be used in macros; for example, ex = y + z; @addConstraint(m, x + 2*ex <= 3)

    Version 0.1.1 (October 23, 2013)

    • Update for solver specification API changes in MathProgBase.

    Version 0.1.0 (October 3, 2013)

    • Initial public release.
    +end
  • The lowerbound, upperbound, and basename keyword arguments to the @variable macro have been renamed to lower_bound, upper_bound, and base_name, for consistency with JuMP's new style recommendations.

  • We rely on broadcasting syntax to apply accessors to collections of variables, for example, value.(x) instead of getvalue(x) for collections. (Use value(x) when x is a scalar object.)

Added

  • Splatting (like f(x...)) is recognized in restricted settings in nonlinear expressions.

  • Support for deleting constraints and variables.

  • The documentation has been completely rewritten using docstrings and Documenter.

  • Support for modeling mixed conic and quadratic models (for example, conic models with quadratic objectives and bi-linear matrix inequalities).

  • Significantly improved support for modeling new types of constraints and for extending JuMP's macros.

  • Support for providing dual warm starts.

  • Improved support for accessing solver-specific attributes (for example, the irreducible inconsistent subsystem).

  • Explicit control of whether symmetry-enforcing constraints are added to PSD constraints.

  • Support for modeling exponential cones.

  • Significant improvements in internal code quality and testing.

  • Style and naming guidelines.

  • Direct mode and manual mode provide explicit control over when copies of a model are stored or regenerated. See the corresponding documentation.

Regressions

There are known regressions from JuMP 0.18 that will be addressed in a future release (0.19.x or later):

  • Performance regressions in model generation (issue). Please file an issue anyway if you notice a significant performance regression. We have plans to address a number of performance issues, but we might not be aware of all of them.

  • Fast incremental NLP solves are not yet reimplemented (issue).

  • We do not yet have an implementation of solver-specific callbacks.

  • The column generation syntax in @variable has been removed (that is, the objective, coefficients, and inconstraints keyword arguments). Support for column generation will be re-introduced in a future release.

  • The ability to solve the continuous relaxation (that is, via solve(model; relaxation = true)) is not yet reimplemented (issue).

Version 0.18.5 (December 1, 2018)

  • Support views in some derivative evaluation functions.
  • Improved compatibility with PackageCompiler.

Version 0.18.4 (October 8, 2018)

  • Fix a bug in model printing on Julia 0.7 and 1.0.

Version 0.18.3 (October 1, 2018)

  • Add support for Julia v1.0 (Thanks @ExpandingMan)
  • Fix matrix expressions with quadratic functions (#1508)

Version 0.18.2 (June 10, 2018)

  • Fix a bug in second-order derivatives when expressions are present (#1319)
  • Fix a bug in @constraintref (#1330)

Version 0.18.1 (April 9, 2018)

  • Fix for nested tuple destructuring (#1193)
  • Preserve internal model when relaxation=true (#1209)
  • Minor bug fixes and updates for example

Version 0.18.0 (July 27, 2017)

  • Drop support for Julia 0.5.
  • Update for ForwardDiff 0.5.
  • Minor bug fixes.

Version 0.17.1 (June 9, 2017)

  • Use of constructconstraint! in @SDconstraint.
  • Minor bug fixes.

Version 0.17.0 (May 27, 2017)

  • Breaking change: Mixing quadratic and conic constraints is no longer supported.
  • Breaking change: The getvariable and getconstraint functions are replaced by indexing on the corresponding symbol. For instance, to access the variable with name x, one should now write m[:x] instead of getvariable(m, :x). As a consequence, creating a variable and constraint with the same name now triggers a warning, and accessing one of them afterwards throws an error. This change is breaking only in the latter case.
  • Addition of the getobjectivebound function that mirrors the functionality of the MathProgBase getobjbound function except that it takes into account transformations performed by JuMP.
  • Minor bug fixes.

The following changes are primarily of interest to developers of JuMP extensions:

  • The new syntax @constraint(model, expr in Cone) creates the constraint ensuring that expr is inside Cone. The Cone argument is passed to constructconstraint! which enables the call to the dispatched to an extension.
  • The @variable macro now calls constructvariable! instead of directly calling the Variable constructor. Extra arguments and keyword arguments passed to @variable are passed to constructvariable! which enables the call to be dispatched to an extension.
  • Refactor the internal function conicdata (used build the MathProgBase conic model) into smaller sub-functions to make these parts reusable by extensions.

Version 0.16.2 (March 28, 2017)

  • Minor bug fixes and printing tweaks
  • Address deprecation warnings for Julia 0.6

Version 0.16.1 (March 7, 2017)

  • Better support for AbstractArray in JuMP (Thanks @tkoolen)
  • Minor bug fixes

Version 0.16.0 (February 23, 2017)

  • Breaking change: JuMP no longer has a mechanism for selecting solvers by default (the previous mechanism was flawed and incompatible with Julia 0.6). Not specifying a solver before calling solve() will result in an error.
  • Breaking change: User-defined functions are no longer global. The first argument to JuMP.register is now a JuMP Model object within whose scope the function will be registered. Calling JuMP.register without a Model now produces an error.
  • Breaking change: Use the new JuMP.fix method to fix a variable to a value or to update the value to which a variable is fixed. Calling setvalue on a fixed variable now results in an error in order to avoid silent behavior changes. (Thanks @joaquimg)
  • Nonlinear expressions now print out similarly to linear/quadratic expressions (useful for debugging!)
  • New category keyword to @variable. Used for specifying categories of anonymous variables.
  • Compatibility with Julia 0.6-dev.
  • Minor fixes and improvements (Thanks @cossio, @ccoffrin, @blegat)

Version 0.15.1 (January 31, 2017)

  • Bugfix for @LinearConstraints and friends

Version 0.15.0 (December 22, 2016)

  • Julia 0.5.0 is the minimum required version for this release.
  • Document support for BARON solver
  • Enable info callbacks in more states than before, for example, for recording solutions. New when argument to addinfocallback (#814, thanks @yeesian)
  • Improved support for anonymous variables. This includes new warnings for potentially confusing use of the traditional non-anonymous syntax:
    • When multiple variables in a model are given the same name
    • When non-symbols are used as names, for example, @variable(m, x[1][1:N])
  • Improvements in iterating over JuMP containers (#836, thanks @IssamT)
  • Support for writing variable names in .lp file output (Thanks @leethargo)
  • Support for querying duals to SDP problems (Thanks @blegat)
  • The comprehension syntax with curly braces sum{}, prod{}, and norm2{} has been deprecated in favor of Julia's native comprehension syntax sum(), prod() and norm() as previously announced. (For early adopters of the new syntax, norm2() was renamed to norm() without deprecation.)
  • Unit tests rewritten to use Base.Test instead of FactCheck
  • Improved support for operations with matrices of JuMP types (Thanks @ExpandingMan)
  • The syntax to halt a solver from inside a callback has changed from throw(CallbackAbort()) to return JuMP.StopTheSolver
  • Minor bug fixes

Version 0.14.2 (December 12, 2016)

  • Allow singleton anonymous variables (includes bugfix)

Version 0.14.1 (September 12, 2016)

  • More consistent handling of states in informational callbacks, includes a new when parameter to addinfocallback for specifying in which state an informational callback should be called.

Version 0.14.0 (August 7, 2016)

  • Compatibility with Julia 0.5 and ForwardDiff 0.2
  • Support for "anonymous" variables, constraints, expressions, and parameters, for example, x = @variable(m, [1:N]) instead of @variable(m, x[1:N])
  • Support for retrieving constraints from a model by name via getconstraint
  • @NLconstraint now returns constraint references (as expected).
  • Support for vectorized expressions within lazy constraints
  • On Julia 0.5, parse new comprehension syntax sum(x[i] for i in 1:N if isodd(i)) instead of sum{ x[i], i in 1:N; isodd(i) }. The old syntax with curly braces will be deprecated in JuMP 0.15.
  • Now possible to provide nonlinear expressions as "raw" Julia Expr objects instead of using JuMP's nonlinear macros. This input format is useful for programmatically generated expressions.
  • s/Mathematical Programming/Mathematical Optimization/
  • Support for local cuts (Thanks to @madanim, Mehdi Madani)
  • Document Xpress interface developed by @joaquimg, Joaquim Dias Garcia
  • Minor bug and deprecation fixes (Thanks @odow, @jrevels)

Version 0.13.2 (May 16, 2016)

  • Compatibility update for MathProgBase

Version 0.13.1 (May 3, 2016)

  • Fix broken deprecation for registerNLfunction.

Version 0.13.0 (April 29, 2016)

  • Most exported methods and macros have been renamed to avoid camelCase. See the list of changes here. There is a 1-1 mapping from the old names to the new, and it is safe to simply replace the names to update existing models.
  • Specify variable lower/upper bounds in @variable using the lowerbound and upperbound keyword arguments.
  • Change name printed for variable using the basename keyword argument to @variable.
  • New @variables macro allows multi-line declaration of groups of variables.
  • A number of solver methods previously available only through MathProgBase are now exposed directly in JuMP. The fix was recorded live.
  • Compatibility fixes with Julia 0.5.
  • The "end" indexing syntax is no longer supported within JuMPArrays which do not use 1-based indexing until upstream issues are resolved, see here.

Version 0.12.2 (March 9, 2016)

  • Small fixes for nonlinear optimization

Version 0.12.1 (March 1, 2016)

  • Fix a regression in slicing for JuMPArrays (when not using 1-based indexing)

Version 0.12.0 (February 27, 2016)

  • The automatic differentiation functionality has been completely rewritten with a number of user-facing changes:
    • @defExpr and @defNLExpr now take the model as the first argument. The previous one-argument version of @defExpr is deprecated; all expressions should be named. For example, replace @defExpr(2x+y) with @defExpr(jump_model, my_expr, 2x+y).
    • JuMP no longer uses Julia's variable binding rules for efficiently re-solving a sequence of nonlinear models. Instead, we have introduced nonlinear parameters. This is a breaking change, so we have added a warning message when we detect models that may depend on the old behavior.
    • Support for user-defined functions integrated within nonlinear JuMP expressions.
  • Replaced iteration over AffExpr with Number-like scalar iteration; previous iteration behavior is now available via linearterms(::AffExpr).
  • Stopping the solver via throw(CallbackAbort()) from a callback no longer triggers an exception. Instead, solve() returns UserLimit status.
  • getDual() now works for conic problems (Thanks @emreyamangil.)

Version 0.11.3 (February 4, 2016)

  • Bug-fix for problems with quadratic objectives and semidefinite constraints

Version 0.11.2 (January 14, 2016)

  • Compatibility update for Mosek

Version 0.11.1 (December 1, 2015)

  • Remove usage of @compat in tests.
  • Fix updating quadratic objectives for nonlinear models.

Version 0.11.0 (November 30, 2015)

  • Julia 0.4.0 is the minimum required version for this release.
  • Fix for scoping semantics of index variables in sum{}. Index variables no longer leak into the surrounding scope.
  • Addition of the solve(m::Model, relaxation=true) keyword argument to solve the standard continuous relaxation of model m
  • The getConstraintBounds() method allows access to the lower and upper bounds of all constraints in a (nonlinear) model.
  • Update for breaking changes in MathProgBase

Version 0.10.3 (November 20, 2015)

  • Fix a rare error when parsing quadratic expressions
  • Fix Variable() constructor with default arguments
  • Detect unrecognized keywords in solve()

Version 0.10.2 (September 28, 2015)

  • Fix for deprecation warnings

Version 0.10.1 (September 3, 2015)

  • Fixes for ambiguity warnings.
  • Fix for breaking change in precompilation syntax in Julia 0.4-pre

Version 0.10.0 (August 31, 2015)

  • Support (on Julia 0.4 and later) for conditions in indexing @defVar and @addConstraint constructs, for example, @defVar(m, x[i=1:5,j=1:5; i+j >= 3])
  • Support for vectorized operations on Variables and expressions. See the documentation for details.
  • New getVar() method to access variables in a model by name
  • Support for semidefinite programming.
  • Dual solutions are now available for general nonlinear problems. You may call getDual on a reference object for a nonlinear constraint, and getDual on a variable object for Lagrange multipliers from active bounds.
  • Introduce warnings for two common performance traps: too many calls to getValue() on a collection of variables and use of the + operator in a loop to sum expressions.
  • Second-order cone constraints can be written directly with the norm() and norm2{} syntax.
  • Implement MathProgBase interface for querying Hessian-vector products.
  • Iteration over JuMPContainers is deprecated; instead, use the keys and values functions, and zip(keys(d),values(d)) for the old behavior.
  • @defVar returns Array{Variable,N} when each of N index sets are of the form 1:nᵢ.
  • Module precompilation: on Julia 0.4 and later, using JuMP is now much faster.

Version 0.9.3 (August 11, 2015)

  • Fixes for FactCheck testing on julia v0.4.

Version 0.9.2 (June 27, 2015)

  • Fix bug in @addConstraints.

Version 0.9.1 (April 25, 2015)

  • Fix for Julia 0.4-dev.
  • Small infrastructure improvements for extensions.

Version 0.9.0 (April 18, 2015)

  • Comparison operators for constructing constraints (for example, 2x >= 1) have been deprecated. Instead, construct the constraints explicitly in the @addConstraint macro to add them to the model, or in the @LinearConstraint macro to create a stand-alone linear constraint instance.
  • getValue() method implemented to compute the value of a nonlinear subexpression
  • JuMP is now released under the Mozilla Public License version 2.0 (was previously LGPL). MPL is a copyleft license which is less restrictive than LGPL, especially for embedding JuMP within other applications.
  • A number of performance improvements in ReverseDiffSparse for computing derivatives.
  • MathProgBase.getsolvetime(m) now returns the solution time reported by the solver, if available. (Thanks @odow, Oscar Dowson)
  • Formatting fix for LP format output. (Thanks @sbebo, Leonardo Taccari).

Version 0.8.0 (February 17, 2015)

  • Nonlinear subexpressions now supported with the @defNLExpr macro.
  • SCS supported for solving second-order conic problems.
  • setXXXCallback family deprecated in favor of addXXXCallback.
  • Multiple callbacks of the same type can be registered.
  • Added support for informational callbacks via addInfoCallback.
  • A CallbackAbort exception can be thrown from callback to safely exit optimization.

Version 0.7.4 (February 4, 2015)

  • Reduced costs and linear constraint duals are now accessible when quadratic constraints are present.
  • Two-sided nonlinear constraints are supported.
  • Methods for accessing the number of variables and constraints in a model are renamed.
  • New default procedure for setting initial values in nonlinear optimization: project zero onto the variable bounds.
  • Small bug fixes.

Version 0.7.3 (January 14, 2015)

  • Fix a method ambiguity conflict with Compose.jl (cosmetic fix)

Version 0.7.2 (January 9, 2015)

  • Fix a bug in sum(::JuMPDict)
  • Added the setCategory function to change a variables category (for example, continuous or binary)

after construction, and getCategory to retrieve the variable category.

Version 0.7.1 (January 2, 2015)

  • Fix a bug in parsing linear expressions in macros. Affects only Julia 0.4 and later.

Version 0.7.0 (December 29, 2014)

Linear/quadratic/conic programming

  • Breaking change: The syntax for column-wise model generation has been changed to use keyword arguments in @defVar.
  • On Julia 0.4 and later, variables and coefficients may be multiplied in any order within macros. That is, variable*coefficient is now valid syntax.
  • ECOS supported for solving second-order conic problems.

Nonlinear programming

  • Support for skipping model generation when solving a sequence of nonlinear models with changing data.
  • Fix a memory leak when solving a sequence of nonlinear models.
  • The @addNLConstraint macro now supports the three-argument version to define sets of nonlinear constraints.
  • KNITRO supported as a nonlinear solver.
  • Speed improvements for model generation.
  • The @addNLConstraints macro supports adding multiple (groups of) constraints at once. Syntax is similar to @addConstraints.
  • Discrete variables allowed in nonlinear problems for solvers which support them (currently only KNITRO).

General

  • Starting values for variables may now be specified with @defVar(m, x, start=value).
  • The setSolver function allows users to change the solver subsequent to model creation.
  • Support for "fixed" variables via the @defVar(m, x == 1) syntax.
  • Unit tests rewritten to use FactCheck.jl, improved testing across solvers.

Version 0.6.3 (October 19, 2014)

  • Fix a bug in multiplying two AffExpr objects.

Version 0.6.2 (October 11, 2014)

  • Further improvements and bug fixes for printing.
  • Fixed a bug in @defExpr.
  • Support for accessing expression graphs through the MathProgBase NLP interface.

Version 0.6.1 (September 19, 2014)

  • Improvements and bug fixes for printing.

Version 0.6.0 (September 9, 2014)

  • Julia 0.3.0 is the minimum required version for this release.
  • buildInternalModel(m::Model) added to build solver-level model in memory without optimizing.
  • Deprecate load_model_only keyword argument to solve.
  • Add groups of constraints with @addConstraints macro.
  • Unicode operators now supported, including for sum, for prod, and /
  • Quadratic constraints supported in @addConstraint macro.
  • Quadratic objectives supported in @setObjective macro.
  • MathProgBase solver-independent interface replaces Ipopt-specific interface for nonlinear problems
    • Breaking change: IpoptOptions no longer supported to specify solver options, use m = Model(solver=IpoptSolver(options...)) instead.
  • New solver interfaces: ECOS, NLopt, and nonlinear support for MOSEK
  • New option to control whether the lazy constraint callback is executed at each node in the B&B tree or just when feasible solutions are found
  • Add support for semicontinuous and semi-integer variables for those solvers that support them.
  • Add support for index dependencies (for example, triangular indexing) in @defVar, @addConstraint, and @defExpr (for example, @defVar(m, x[i=1:10,j=i:10])).
    • This required some changes to the internal structure of JuMP containers, which may break code that explicitly stored JuMPDict objects.

Version 0.5.8 (September 24, 2014)

  • Fix a bug with specifying solvers (affects Julia 0.2 only)

Version 0.5.7 (September 5, 2014)

  • Fix a bug in printing models

Version 0.5.6 (September 2, 2014)

  • Add support for semicontinuous and semi-integer variables for those solvers that support them.
    • Breaking change: Syntax for Variable() constructor has changed (use of this interface remains discouraged)
  • Update for breaking changes in MathProgBase

Version 0.5.5 (July 6, 2014)

  • Fix bug with problem modification: adding variables that did not appear in existing constraints or objective.

Version 0.5.4 (June 19, 2014)

  • Update for breaking change in MathProgBase which reduces loading times for using JuMP
  • Fix error when MIPs not solved to optimality

Version 0.5.3 (May 21, 2014)

  • Update for breaking change in ReverseDiffSparse

Version 0.5.2 (May 9, 2014)

  • Fix compatibility with Julia 0.3 prerelease

Version 0.5.1 (May 5, 2014)

  • Fix a bug in coefficient handling inside lazy constraints and user cuts

Version 0.5.0 (May 2, 2014)

  • Support for nonlinear optimization with exact, sparse second-order derivatives automatically computed. Ipopt is currently the only solver supported.
  • getValue for AffExpr and QuadExpr
  • Breaking change: getSolverModel replaced by getInternalModel, which returns the internal MathProgBase-level model
  • Groups of constraints can be specified with @addConstraint (see documentation for details). This is not a breaking change.
  • dot(::JuMPDict{Variable},::JuMPDict{Variable}) now returns the corresponding quadratic expression.

Version 0.4.1 (March 24, 2014)

  • Fix bug where change in objective sense was ignored when re-solving a model.
  • Fix issue with handling zero coefficients in AffExpr.

Version 0.4.0 (March 10, 2014)

  • Support for SOS1 and SOS2 constraints.
  • Solver-independent callback for user heuristics.
  • dot and sum implemented for JuMPDict objects. Now you can say @addConstraint(m, dot(a,x) <= b).
  • Developers: support for extensions to JuMP. See definition of Model in src/JuMP.jl for more details.
  • Option to construct the low-level model before optimizing.

Version 0.3.2 (February 17, 2014)

  • Improved model printing
    • Preliminary support for IJulia output

Version 0.3.1 (January 30, 2014)

  • Documentation updates
  • Support for MOSEK
  • CPLEXLink renamed to CPLEX

Version 0.3.0 (January 21, 2014)

  • Unbounded/infeasibility rays: getValue() will return the corresponding components of an unbounded ray when a model is unbounded, if supported by the selected solver. getDual() will return an infeasibility ray (Farkas proof) if a model is infeasible and the selected solver supports this feature.
  • Solver-independent callbacks for user generated cuts.
  • Use new interface for solver-independent QCQP.
  • setlazycallback renamed to setLazyCallback for consistency.

Version 0.2.0 (December 15, 2013)

Breaking

  • Objective sense is specified in setObjective instead of in the Model constructor.
  • lpsolver and mipsolver merged into single solver option.

Added

  • Problem modification with efficient LP restarts and MIP warm-starts.
  • Relatedly, column-wise modeling now supported.
  • Solver-independent callbacks supported. Currently we support only a "lazy constraint" callback, which works with Gurobi, CPLEX, and GLPK. More callbacks coming soon.

Version 0.1.2 (November 16, 2013)

  • Bug fixes for printing, improved error messages.
  • Allow AffExpr to be used in macros; for example, ex = y + z; @addConstraint(m, x + 2*ex <= 3)

Version 0.1.1 (October 23, 2013)

  • Update for solver specification API changes in MathProgBase.

Version 0.1.0 (October 3, 2013)

  • Initial public release.
diff --git a/dev/search_index.js b/dev/search_index.js index feae81f6748..998265bc099 100644 --- a/dev/search_index.js +++ b/dev/search_index.js @@ -1,3 +1,3 @@ var documenterSearchIndex = {"docs": -[{"location":"moi/reference/nonlinear/","page":"Nonlinear programming","title":"Nonlinear programming","text":"EditURL = \"https://github.com/jump-dev/MathOptInterface.jl/blob/v1.34.0/docs/src/reference/nonlinear.md\"","category":"page"},{"location":"moi/reference/nonlinear/","page":"Nonlinear programming","title":"Nonlinear programming","text":"CurrentModule = MathOptInterface\nDocTestSetup = quote\n import MathOptInterface as MOI\nend\nDocTestFilters = [r\"MathOptInterface|MOI\"]","category":"page"},{"location":"moi/reference/nonlinear/#Nonlinear-programming","page":"Nonlinear programming","title":"Nonlinear programming","text":"","category":"section"},{"location":"moi/reference/nonlinear/#Types","page":"Nonlinear programming","title":"Types","text":"","category":"section"},{"location":"moi/reference/nonlinear/","page":"Nonlinear programming","title":"Nonlinear programming","text":"AbstractNLPEvaluator\nNLPBoundsPair\nNLPBlockData","category":"page"},{"location":"moi/reference/nonlinear/#MathOptInterface.AbstractNLPEvaluator","page":"Nonlinear programming","title":"MathOptInterface.AbstractNLPEvaluator","text":"AbstractNLPEvaluator\n\nAbstract supertype for the callback object that is used to query function values, derivatives, and expression graphs.\n\nIt is used in NLPBlockData.\n\nExample\n\nThis example uses the Test.HS071 evaluator.\n\njulia> import MathOptInterface as MOI\n\njulia> evaluator = MOI.Test.HS071(true);\n\njulia> supertype(typeof(evaluator))\nMathOptInterface.AbstractNLPEvaluator\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/nonlinear/#MathOptInterface.NLPBoundsPair","page":"Nonlinear programming","title":"MathOptInterface.NLPBoundsPair","text":"NLPBoundsPair(lower::Float64, upper::Float64)\n\nA struct holding a pair of lower and upper bounds.\n\n-Inf and Inf can be used to indicate no lower or upper bound, respectively.\n\nExample\n\njulia> import MathOptInterface as MOI\n\njulia> bounds = MOI.NLPBoundsPair.([25.0, 40.0], [Inf, 40.0])\n2-element Vector{MathOptInterface.NLPBoundsPair}:\n MathOptInterface.NLPBoundsPair(25.0, Inf)\n MathOptInterface.NLPBoundsPair(40.0, 40.0)\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/nonlinear/#MathOptInterface.NLPBlockData","page":"Nonlinear programming","title":"MathOptInterface.NLPBlockData","text":"struct NLPBlockData\n constraint_bounds::Vector{NLPBoundsPair}\n evaluator::AbstractNLPEvaluator\n has_objective::Bool\nend\n\nA struct encoding a set of nonlinear constraints of the form lb le g(x) le ub and, if has_objective == true, a nonlinear objective function f(x).\n\nNonlinear objectives override any objective set by using the ObjectiveFunction attribute.\n\nThe evaluator is a callback object that is used to query function values, derivatives, and expression graphs. If has_objective == false, then it is an error to query properties of the objective function, and in Hessian-of-the-Lagrangian queries, σ must be set to zero.\n\nnote: Note\nThroughout the evaluator, all variables are ordered according to ListOfVariableIndices. Hence, MOI copies of nonlinear problems must not re-order variables.\n\nExample\n\nThis example uses the Test.HS071 evaluator.\n\njulia> import MathOptInterface as MOI\n\njulia> model = MOI.Utilities.UniversalFallback(MOI.Utilities.Model{Float64}());\n\njulia> block = MOI.NLPBlockData(\n MOI.NLPBoundsPair.([25.0, 40.0], [Inf, 40.0]),\n MOI.Test.HS071(true),\n true,\n );\n\njulia> MOI.set(model, MOI.NLPBlock(), block)\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/nonlinear/#Attributes","page":"Nonlinear programming","title":"Attributes","text":"","category":"section"},{"location":"moi/reference/nonlinear/","page":"Nonlinear programming","title":"Nonlinear programming","text":"NLPBlock\nNLPBlockDual\nNLPBlockDualStart","category":"page"},{"location":"moi/reference/nonlinear/#MathOptInterface.NLPBlock","page":"Nonlinear programming","title":"MathOptInterface.NLPBlock","text":"NLPBlock()\n\nAn AbstractModelAttribute that stores an NLPBlockData, representing a set of nonlinear constraints, and optionally a nonlinear objective.\n\nExample\n\nThis example uses the Test.HS071 evaluator.\n\njulia> import MathOptInterface as MOI\n\njulia> model = MOI.Utilities.UniversalFallback(MOI.Utilities.Model{Float64}());\n\njulia> block = MOI.NLPBlockData(\n MOI.NLPBoundsPair.([25.0, 40.0], [Inf, 40.0]),\n MOI.Test.HS071(true),\n true,\n );\n\njulia> MOI.set(model, MOI.NLPBlock(), block)\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/nonlinear/#MathOptInterface.NLPBlockDual","page":"Nonlinear programming","title":"MathOptInterface.NLPBlockDual","text":"NLPBlockDual(result_index::Int = 1)\n\nAn AbstractModelAttribute for the Lagrange multipliers on the constraints from the NLPBlock in result result_index.\n\nIf result_index is omitted, it is 1 by default.\n\nExample\n\njulia> import MathOptInterface as MOI\n\njulia> MOI.NLPBlockDual()\nMathOptInterface.NLPBlockDual(1)\n\njulia> MOI.NLPBlockDual(2)\nMathOptInterface.NLPBlockDual(2)\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/nonlinear/#MathOptInterface.NLPBlockDualStart","page":"Nonlinear programming","title":"MathOptInterface.NLPBlockDualStart","text":"NLPBlockDualStart()\n\nAn AbstractModelAttribute for the initial assignment of the Lagrange multipliers on the constraints from the NLPBlock that the solver may use to warm-start the solve.\n\nExample\n\nThis example uses the Test.HS071 evaluator.\n\njulia> import MathOptInterface as MOI\n\njulia> model = MOI.Utilities.UniversalFallback(MOI.Utilities.Model{Float64}());\n\njulia> block = MOI.NLPBlockData(\n MOI.NLPBoundsPair.([25.0, 40.0], [Inf, 40.0]),\n MOI.Test.HS071(true),\n true,\n );\n\njulia> MOI.set(model, MOI.NLPBlock(), block)\n\njulia> MOI.set(model, MOI.NLPBlockDualStart(), [1.0, 2.0])\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/nonlinear/#Functions","page":"Nonlinear programming","title":"Functions","text":"","category":"section"},{"location":"moi/reference/nonlinear/","page":"Nonlinear programming","title":"Nonlinear programming","text":"initialize\nfeatures_available\neval_objective\neval_constraint\neval_objective_gradient\njacobian_structure\neval_constraint_gradient\nconstraint_gradient_structure\neval_constraint_jacobian\neval_constraint_jacobian_product\neval_constraint_jacobian_transpose_product\nhessian_lagrangian_structure\nhessian_objective_structure\nhessian_constraint_structure\neval_hessian_objective\neval_hessian_constraint\neval_hessian_lagrangian\neval_hessian_lagrangian_product\nobjective_expr\nconstraint_expr","category":"page"},{"location":"moi/reference/nonlinear/#MathOptInterface.initialize","page":"Nonlinear programming","title":"MathOptInterface.initialize","text":"initialize(\n d::AbstractNLPEvaluator,\n requested_features::Vector{Symbol},\n)::Nothing\n\nInitialize d with the set of features in requested_features. Check features_available before calling initialize to see what features are supported by d.\n\nwarning: Warning\nThis method must be called before any other methods.\n\nFeatures\n\nThe following features are defined:\n\n:Grad: enables eval_objective_gradient\n:Jac: enables eval_constraint_jacobian and eval_constraint_gradient\n:JacVec: enables eval_constraint_jacobian_product and eval_constraint_jacobian_transpose_product\n:Hess: enables eval_hessian_lagrangian\n:HessVec: enables eval_hessian_lagrangian_product\n:ExprGraph: enables objective_expr and constraint_expr.\n\nIn all cases, including when requested_features is empty, eval_objective and eval_constraint are supported.\n\nExample\n\nThis example uses the Test.HS071 evaluator.\n\njulia> import MathOptInterface as MOI\n\njulia> evaluator = MOI.Test.HS071(true);\n\njulia> MOI.initialize(evaluator, [:Grad, :Jac])\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/nonlinear/#MathOptInterface.features_available","page":"Nonlinear programming","title":"MathOptInterface.features_available","text":"features_available(d::AbstractNLPEvaluator)::Vector{Symbol}\n\nReturns the subset of features available for this problem instance.\n\nSee initialize for the list of defined features.\n\nExample\n\nThis example uses the Test.HS071 evaluator.\n\njulia> import MathOptInterface as MOI\n\njulia> evaluator = MOI.Test.HS071(true, true);\n\njulia> MOI.features_available(evaluator)\n6-element Vector{Symbol}:\n :Grad\n :Jac\n :JacVec\n :ExprGraph\n :Hess\n :HessVec\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/nonlinear/#MathOptInterface.eval_objective","page":"Nonlinear programming","title":"MathOptInterface.eval_objective","text":"eval_objective(d::AbstractNLPEvaluator, x::AbstractVector{T})::T where {T}\n\nEvaluate the objective f(x), returning a scalar value.\n\nInitialize\n\nBefore calling this function, you must call initialize, but you do not need to pass a value.\n\nExample\n\nThis example uses the Test.HS071 evaluator.\n\njulia> import MathOptInterface as MOI\n\njulia> evaluator = MOI.Test.HS071(true);\n\njulia> MOI.initialize(evaluator, Symbol[])\n\njulia> MOI.eval_objective(evaluator, [1.0, 2.0, 3.0, 4.0])\n27.0\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/nonlinear/#MathOptInterface.eval_constraint","page":"Nonlinear programming","title":"MathOptInterface.eval_constraint","text":"eval_constraint(\n d::AbstractNLPEvaluator,\n g::AbstractVector{T},\n x::AbstractVector{T},\n)::Nothing where {T}\n\nGiven a set of vector-valued constraints l le g(x) le u, evaluate the constraint function g(x), storing the result in the vector g.\n\nInitialize\n\nBefore calling this function, you must call initialize, but you do not need to pass a value.\n\nImplementation notes\n\nWhen implementing this method, you must not assume that g is Vector{Float64}, but you may assume that it supports setindex! and length. For example, it may be the view of a vector.\n\nExample\n\nThis example uses the Test.HS071 evaluator.\n\njulia> import MathOptInterface as MOI\n\njulia> evaluator = MOI.Test.HS071(true);\n\njulia> MOI.initialize(evaluator, Symbol[])\n\njulia> g = fill(NaN, 2);\n\njulia> MOI.eval_constraint(evaluator, g, [1.0, 2.0, 3.0, 4.0])\n\njulia> g\n2-element Vector{Float64}:\n 24.0\n 30.0\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/nonlinear/#MathOptInterface.eval_objective_gradient","page":"Nonlinear programming","title":"MathOptInterface.eval_objective_gradient","text":"eval_objective_gradient(\n d::AbstractNLPEvaluator,\n grad::AbstractVector{T},\n x::AbstractVector{T},\n)::Nothing where {T}\n\nEvaluate the gradient of the objective function grad = nabla f(x) as a dense vector, storing the result in the vector grad.\n\nInitialize\n\nBefore calling this function, you must call initialize with :Grad.\n\nImplementation notes\n\nWhen implementing this method, you must not assume that grad is Vector{Float64}, but you may assume that it supports setindex! and length. For example, it may be the view of a vector.\n\nExample\n\nThis example uses the Test.HS071 evaluator.\n\njulia> import MathOptInterface as MOI\n\njulia> evaluator = MOI.Test.HS071(true);\n\njulia> MOI.initialize(evaluator, Symbol[:Grad])\n\njulia> grad = fill(NaN, 4);\n\njulia> MOI.eval_objective_gradient(evaluator, grad, [1.0, 2.0, 3.0, 4.0])\n\njulia> grad\n4-element Vector{Float64}:\n 28.0\n 4.0\n 5.0\n 6.0\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/nonlinear/#MathOptInterface.jacobian_structure","page":"Nonlinear programming","title":"MathOptInterface.jacobian_structure","text":"jacobian_structure(d::AbstractNLPEvaluator)::Vector{Tuple{Int64,Int64}}\n\nReturns a vector of tuples, (row, column), where each indicates the position of a structurally nonzero element in the Jacobian matrix: J_g(x) = left beginarrayc nabla g_1(x) nabla g_2(x) vdots nabla g_m(x) endarrayright where g_i is the itextth component of the nonlinear constraints g(x).\n\nThe indices are not required to be sorted and can contain duplicates, in which case the solver should combine the corresponding elements by adding them together.\n\nThe sparsity structure is assumed to be independent of the point x.\n\nInitialize\n\nBefore calling this function, you must call initialize with :Jac.\n\nExample\n\nThis example uses the Test.HS071 evaluator.\n\njulia> import MathOptInterface as MOI\n\njulia> evaluator = MOI.Test.HS071(true);\n\njulia> MOI.initialize(evaluator, Symbol[:Jac])\n\njulia> MOI.jacobian_structure(evaluator)\n8-element Vector{Tuple{Int64, Int64}}:\n (1, 1)\n (1, 2)\n (1, 3)\n (1, 4)\n (2, 1)\n (2, 2)\n (2, 3)\n (2, 4)\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/nonlinear/#MathOptInterface.eval_constraint_gradient","page":"Nonlinear programming","title":"MathOptInterface.eval_constraint_gradient","text":"eval_constraint_gradient(\n d::AbstractNLPEvaluator,\n ∇g::AbstractVector{T},\n x::AbstractVector{T},\n i::Int,\n)::Nothing where {T}\n\nEvaluate the gradient of constraint i, nabla g_i(x), and store the non-zero values in ∇g, corresponding to the structure returned by constraint_gradient_structure.\n\nImplementation notes\n\nWhen implementing this method, you must not assume that ∇g is Vector{Float64}, but you may assume that it supports setindex! and length. For example, it may be the view of a vector.\n\nInitialize\n\nBefore calling this function, you must call initialize with :Jac.\n\nExample\n\nThis example uses the Test.HS071 evaluator.\n\njulia> import MathOptInterface as MOI\n\njulia> evaluator = MOI.Test.HS071(true);\n\njulia> MOI.initialize(evaluator, Symbol[:Jac])\n\njulia> indices = MOI.constraint_gradient_structure(evaluator, 1);\n\njulia> ∇g = zeros(length(indices));\n\njulia> MOI.eval_constraint_gradient(evaluator, ∇g, [1.0, 2.0, 3.0, 4.0], 1)\n\njulia> ∇g\n4-element Vector{Float64}:\n 24.0\n 12.0\n 8.0\n 6.0\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/nonlinear/#MathOptInterface.constraint_gradient_structure","page":"Nonlinear programming","title":"MathOptInterface.constraint_gradient_structure","text":"constraint_gradient_structure(d::AbstractNLPEvaluator, i::Int)::Vector{Int64}\n\nReturns a vector of indices, where each element indicates the position of a structurally nonzero element in the gradient of constraint nabla g_i(x).\n\nThe indices are not required to be sorted and can contain duplicates, in which case the solver should combine the corresponding elements by adding them together.\n\nThe sparsity structure is assumed to be independent of the point x.\n\nInitialize\n\nBefore calling this function, you must call initialize with :Jac.\n\nExample\n\nThis example uses the Test.HS071 evaluator.\n\njulia> import MathOptInterface as MOI\n\njulia> evaluator = MOI.Test.HS071(true);\n\njulia> MOI.initialize(evaluator, Symbol[:Jac])\n\njulia> indices = MOI.constraint_gradient_structure(evaluator, 1)\n4-element Vector{Int64}:\n 1\n 2\n 3\n 4\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/nonlinear/#MathOptInterface.eval_constraint_jacobian","page":"Nonlinear programming","title":"MathOptInterface.eval_constraint_jacobian","text":"eval_constraint_jacobian(\n d::AbstractNLPEvaluator,\n J::AbstractVector{T},\n x::AbstractVector{T},\n)::Nothing where {T}\n\nEvaluates the sparse Jacobian matrix J_g(x) = left beginarrayc nabla g_1(x) nabla g_2(x) vdots nabla g_m(x) endarrayright.\n\nThe result is stored in the vector J in the same order as the indices returned by jacobian_structure.\n\nImplementation notes\n\nWhen implementing this method, you must not assume that J is Vector{Float64}, but you may assume that it supports setindex! and length. For example, it may be the view of a vector.\n\nInitialize\n\nBefore calling this function, you must call initialize with :Hess.\n\nExample\n\nThis example uses the Test.HS071 evaluator.\n\njulia> import MathOptInterface as MOI\n\njulia> evaluator = MOI.Test.HS071(true);\n\njulia> MOI.initialize(evaluator, Symbol[:Jac])\n\njulia> J_indices = MOI.jacobian_structure(evaluator);\n\njulia> J = zeros(length(J_indices));\n\njulia> MOI.eval_constraint_jacobian(evaluator, J, [1.0, 2.0, 3.0, 4.0])\n\njulia> J\n8-element Vector{Float64}:\n 24.0\n 12.0\n 8.0\n 6.0\n 2.0\n 4.0\n 6.0\n 8.0\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/nonlinear/#MathOptInterface.eval_constraint_jacobian_product","page":"Nonlinear programming","title":"MathOptInterface.eval_constraint_jacobian_product","text":"eval_constraint_jacobian_product(\n d::AbstractNLPEvaluator,\n y::AbstractVector{T},\n x::AbstractVector{T},\n w::AbstractVector{T},\n)::Nothing where {T}\n\nComputes the Jacobian-vector product y = J_g(x)w, storing the result in the vector y.\n\nThe vectors have dimensions such that length(w) == length(x), and length(y) is the number of nonlinear constraints.\n\nImplementation notes\n\nWhen implementing this method, you must not assume that y is Vector{Float64}, but you may assume that it supports setindex! and length. For example, it may be the view of a vector.\n\nInitialize\n\nBefore calling this function, you must call initialize with :JacVec.\n\nExample\n\nThis example uses the Test.HS071 evaluator.\n\njulia> import MathOptInterface as MOI\n\njulia> evaluator = MOI.Test.HS071(true);\n\njulia> MOI.initialize(evaluator, Symbol[:Jac, :JacVec])\n\njulia> y = zeros(2);\n\njulia> x = [1.0, 2.0, 3.0, 4.0];\n\njulia> w = [1.5, 2.5, 3.5, 4.5];\n\njulia> MOI.eval_constraint_jacobian_product(evaluator, y, x, w)\n\njulia> y\n2-element Vector{Float64}:\n 121.0\n 70.0\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/nonlinear/#MathOptInterface.eval_constraint_jacobian_transpose_product","page":"Nonlinear programming","title":"MathOptInterface.eval_constraint_jacobian_transpose_product","text":"eval_constraint_jacobian_transpose_product(\n d::AbstractNLPEvaluator,\n y::AbstractVector{T},\n x::AbstractVector{T},\n w::AbstractVector{T},\n)::Nothing where {T}\n\nComputes the Jacobian-transpose-vector product y = J_g(x)^Tw, storing the result in the vector y.\n\nThe vectors have dimensions such that length(y) == length(x), and length(w) is the number of nonlinear constraints.\n\nImplementation notes\n\nWhen implementing this method, you must not assume that y is Vector{Float64}, but you may assume that it supports setindex! and length. For example, it may be the view of a vector.\n\nInitialize\n\nBefore calling this function, you must call initialize with :JacVec.\n\nExample\n\nThis example uses the Test.HS071 evaluator.\n\njulia> import MathOptInterface as MOI\n\njulia> evaluator = MOI.Test.HS071(true);\n\njulia> MOI.initialize(evaluator, Symbol[:Jac, :JacVec])\n\njulia> y = zeros(4);\n\njulia> x = [1.0, 2.0, 3.0, 4.0];\n\njulia> w = [1.5, 2.5];\n\njulia> MOI.eval_constraint_jacobian_transpose_product(evaluator, y, x, w)\n\njulia> y\n4-element Vector{Float64}:\n 41.0\n 28.0\n 27.0\n 29.0\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/nonlinear/#MathOptInterface.hessian_lagrangian_structure","page":"Nonlinear programming","title":"MathOptInterface.hessian_lagrangian_structure","text":"hessian_lagrangian_structure(\n d::AbstractNLPEvaluator,\n)::Vector{Tuple{Int64,Int64}}\n\nReturns a vector of tuples, (row, column), where each indicates the position of a structurally nonzero element in the Hessian-of-the-Lagrangian matrix: nabla^2 f(x) + sum_i=1^m nabla^2 g_i(x).\n\nThe indices are not required to be sorted and can contain duplicates, in which case the solver should combine the corresponding elements by adding them together.\n\nAny mix of lower and upper-triangular indices is valid. Elements (i, j) and (j, i), if both present, should be treated as duplicates.\n\nThe sparsity structure is assumed to be independent of the point x.\n\nInitialize\n\nBefore calling this function, you must call initialize with :Hess.\n\nExample\n\nThis example uses the Test.HS071 evaluator.\n\njulia> import MathOptInterface as MOI\n\njulia> evaluator = MOI.Test.HS071(true);\n\njulia> MOI.initialize(evaluator, Symbol[:Hess])\n\njulia> MOI.hessian_lagrangian_structure(evaluator)\n10-element Vector{Tuple{Int64, Int64}}:\n (1, 1)\n (2, 1)\n (2, 2)\n (3, 1)\n (3, 2)\n (3, 3)\n (4, 1)\n (4, 2)\n (4, 3)\n (4, 4)\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/nonlinear/#MathOptInterface.hessian_objective_structure","page":"Nonlinear programming","title":"MathOptInterface.hessian_objective_structure","text":"hessian_objective_structure(\n d::AbstractNLPEvaluator,\n)::Vector{Tuple{Int64,Int64}}\n\nReturns a vector of tuples, (row, column), where each indicates the position of a structurally nonzero element in the Hessian matrix: nabla^2 f(x).\n\nThe indices are not required to be sorted and can contain duplicates, in which case the solver should combine the corresponding elements by adding them together.\n\nAny mix of lower and upper-triangular indices is valid. Elements (i, j) and (j, i), if both present, should be treated as duplicates.\n\nThe sparsity structure is assumed to be independent of the point x.\n\nInitialize\n\nBefore calling this function, you must call initialize with :Hess.\n\nExample\n\nThis example uses the Test.HS071 evaluator.\n\njulia> import MathOptInterface as MOI\n\njulia> evaluator = MOI.Test.HS071(true);\n\njulia> MOI.initialize(evaluator, Symbol[:Hess])\n\njulia> MOI.hessian_objective_structure(evaluator)\n6-element Vector{Tuple{Int64, Int64}}:\n (1, 1)\n (2, 1)\n (3, 1)\n (4, 1)\n (4, 2)\n (4, 3)\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/nonlinear/#MathOptInterface.hessian_constraint_structure","page":"Nonlinear programming","title":"MathOptInterface.hessian_constraint_structure","text":"hessian_constraint_structure(\n d::AbstractNLPEvaluator,\n i::Int64,\n)::Vector{Tuple{Int64,Int64}}\n\nReturns a vector of tuples, (row, column), where each indicates the position of a structurally nonzero element in the Hessian matrix: nabla^2 g_i(x).\n\nThe indices are not required to be sorted and can contain duplicates, in which case the solver should combine the corresponding elements by adding them together.\n\nAny mix of lower and upper-triangular indices is valid. Elements (i, j) and (j, i), if both present, should be treated as duplicates.\n\nThe sparsity structure is assumed to be independent of the point x.\n\nInitialize\n\nBefore calling this function, you must call initialize with :Hess.\n\nExample\n\nThis example uses the Test.HS071 evaluator.\n\njulia> import MathOptInterface as MOI\n\njulia> evaluator = MOI.Test.HS071(true);\n\njulia> MOI.initialize(evaluator, Symbol[:Hess])\n\njulia> MOI.hessian_constraint_structure(evaluator, 1)\n6-element Vector{Tuple{Int64, Int64}}:\n (2, 1)\n (3, 1)\n (3, 2)\n (4, 1)\n (4, 2)\n (4, 3)\n\njulia> MOI.hessian_constraint_structure(evaluator, 2)\n4-element Vector{Tuple{Int64, Int64}}:\n (1, 1)\n (2, 2)\n (3, 3)\n (4, 4)\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/nonlinear/#MathOptInterface.eval_hessian_objective","page":"Nonlinear programming","title":"MathOptInterface.eval_hessian_objective","text":"eval_hessian_objective(\n d::AbstractNLPEvaluator,\n H::AbstractVector{T},\n x::AbstractVector{T},\n)::Nothing where {T}\n\nThis function computes the sparse Hessian matrix: nabla^2 f(x), storing the result in the vector H in the same order as the indices returned by hessian_objective_structure.\n\nImplementation notes\n\nWhen implementing this method, you must not assume that H is Vector{Float64}, but you may assume that it supports setindex! and length. For example, it may be the view of a vector.\n\nInitialize\n\nBefore calling this function, you must call initialize with :Hess.\n\nExample\n\nThis example uses the Test.HS071 evaluator.\n\njulia> import MathOptInterface as MOI\n\njulia> evaluator = MOI.Test.HS071(true, true);\n\njulia> MOI.initialize(evaluator, Symbol[:Hess])\n\njulia> indices = MOI.hessian_objective_structure(evaluator);\n\njulia> H = zeros(length(indices));\n\njulia> x = [1.0, 2.0, 3.0, 4.0];\n\njulia> MOI.eval_hessian_objective(evaluator, H, x)\n\njulia> H\n6-element Vector{Float64}:\n 8.0\n 4.0\n 4.0\n 7.0\n 1.0\n 1.0\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/nonlinear/#MathOptInterface.eval_hessian_constraint","page":"Nonlinear programming","title":"MathOptInterface.eval_hessian_constraint","text":"eval_hessian_constraint(\n d::AbstractNLPEvaluator,\n H::AbstractVector{T},\n x::AbstractVector{T},\n i::Int64,\n)::Nothing where {T}\n\nThis function computes the sparse Hessian matrix: nabla^2 g_i(x), storing the result in the vector H in the same order as the indices returned by hessian_constraint_structure.\n\nImplementation notes\n\nWhen implementing this method, you must not assume that H is Vector{Float64}, but you may assume that it supports setindex! and length. For example, it may be the view of a vector.\n\nInitialize\n\nBefore calling this function, you must call initialize with :Hess.\n\nExample\n\nThis example uses the Test.HS071 evaluator.\n\njulia> import MathOptInterface as MOI\n\njulia> evaluator = MOI.Test.HS071(true, true);\n\njulia> MOI.initialize(evaluator, Symbol[:Hess])\n\njulia> indices = MOI.hessian_constraint_structure(evaluator, 1);\n\njulia> H = zeros(length(indices));\n\njulia> x = [1.0, 2.0, 3.0, 4.0];\n\njulia> MOI.eval_hessian_constraint(evaluator, H, x, 1)\n\njulia> H\n6-element Vector{Float64}:\n 12.0\n 8.0\n 4.0\n 6.0\n 3.0\n 2.0\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/nonlinear/#MathOptInterface.eval_hessian_lagrangian","page":"Nonlinear programming","title":"MathOptInterface.eval_hessian_lagrangian","text":"eval_hessian_lagrangian(\n d::AbstractNLPEvaluator,\n H::AbstractVector{T},\n x::AbstractVector{T},\n σ::T,\n μ::AbstractVector{T},\n)::Nothing where {T}\n\nGiven scalar weight σ and vector of constraint weights μ, this function computes the sparse Hessian-of-the-Lagrangian matrix: sigmanabla^2 f(x) + sum_i=1^m mu_i nabla^2 g_i(x), storing the result in the vector H in the same order as the indices returned by hessian_lagrangian_structure.\n\nImplementation notes\n\nWhen implementing this method, you must not assume that H is Vector{Float64}, but you may assume that it supports setindex! and length. For example, it may be the view of a vector.\n\nInitialize\n\nBefore calling this function, you must call initialize with :Hess.\n\nExample\n\nThis example uses the Test.HS071 evaluator.\n\njulia> import MathOptInterface as MOI\n\njulia> evaluator = MOI.Test.HS071(true);\n\njulia> MOI.initialize(evaluator, Symbol[:Hess])\n\njulia> indices = MOI.hessian_lagrangian_structure(evaluator);\n\njulia> H = zeros(length(indices));\n\njulia> x = [1.0, 2.0, 3.0, 4.0];\n\njulia> σ = 1.0;\n\njulia> μ = [1.0, 1.0];\n\njulia> MOI.eval_hessian_lagrangian(evaluator, H, x, σ, μ)\n\njulia> H\n10-element Vector{Float64}:\n 10.0\n 16.0\n 2.0\n 12.0\n 4.0\n 2.0\n 13.0\n 4.0\n 3.0\n 2.0\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/nonlinear/#MathOptInterface.eval_hessian_lagrangian_product","page":"Nonlinear programming","title":"MathOptInterface.eval_hessian_lagrangian_product","text":"eval_hessian_lagrangian_product(\n d::AbstractNLPEvaluator,\n h::AbstractVector{T},\n x::AbstractVector{T},\n v::AbstractVector{T},\n σ::T,\n μ::AbstractVector{T},\n)::Nothing where {T}\n\nGiven scalar weight σ and vector of constraint weights μ, computes the Hessian-of-the-Lagrangian-vector product h = left(sigmanabla^2 f(x) + sum_i=1^m mu_i nabla^2 g_i(x)right)v, storing the result in the vector h.\n\nThe vectors have dimensions such that length(h) == length(x) == length(v).\n\nImplementation notes\n\nWhen implementing this method, you must not assume that h is Vector{Float64}, but you may assume that it supports setindex! and length. For example, it may be the view of a vector.\n\nInitialize\n\nBefore calling this function, you must call initialize with :HessVec.\n\nExample\n\nThis example uses the Test.HS071 evaluator.\n\njulia> import MathOptInterface as MOI\n\njulia> evaluator = MOI.Test.HS071(true, true);\n\njulia> MOI.initialize(evaluator, Symbol[:HessVec])\n\njulia> H = fill(NaN, 4);\n\njulia> x = [1.0, 2.0, 3.0, 4.0];\n\njulia> v = [1.5, 2.5, 3.5, 4.5];\n\njulia> σ = 1.0;\n\njulia> μ = [1.0, 1.0];\n\njulia> MOI.eval_hessian_lagrangian_product(evaluator, H, x, v, σ, μ)\n\njulia> H\n4-element Vector{Float64}:\n 155.5\n 61.0\n 48.5\n 49.0\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/nonlinear/#MathOptInterface.objective_expr","page":"Nonlinear programming","title":"MathOptInterface.objective_expr","text":"objective_expr(d::AbstractNLPEvaluator)::Expr\n\nReturns a Julia Expr object representing the expression graph of the objective function.\n\nFormat\n\nThe expression has a number of limitations, compared with arbitrary Julia expressions:\n\nAll sums and products are flattened out as simple Expr(:+, ...) and Expr(:*, ...) objects.\nAll decision variables must be of the form Expr(:ref, :x, MOI.VariableIndex(i)), where i is the ith variable in ListOfVariableIndices.\nThere are currently no restrictions on recognized functions; typically these will be built-in Julia functions like ^, exp, log, cos, tan, sqrt, etc., but modeling interfaces may choose to extend these basic functions, or error if they encounter unsupported functions.\n\nInitialize\n\nBefore calling this function, you must call initialize with :ExprGraph.\n\nExample\n\nThis example uses the Test.HS071 evaluator.\n\njulia> import MathOptInterface as MOI\n\njulia> evaluator = MOI.Test.HS071(true);\n\njulia> MOI.initialize(evaluator, [:ExprGraph])\n\njulia> MOI.objective_expr(evaluator)\n:(x[MOI.VariableIndex(1)] * x[MOI.VariableIndex(4)] * (x[MOI.VariableIndex(1)] + x[MOI.VariableIndex(2)] + x[MOI.VariableIndex(3)]) + x[MOI.VariableIndex(3)])\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/nonlinear/#MathOptInterface.constraint_expr","page":"Nonlinear programming","title":"MathOptInterface.constraint_expr","text":"constraint_expr(d::AbstractNLPEvaluator, i::Integer)::Expr\n\nReturns a Julia Expr object representing the expression graph for the itextth nonlinear constraint.\n\nFormat\n\nThe format is the same as objective_expr, with an additional comparison operator indicating the sense of and bounds on the constraint.\n\nFor single-sided comparisons, the body of the constraint must be on the left-hand side, and the right-hand side must be a constant.\n\nFor double-sided comparisons (that is, l le g(x) le u), the body of the constraint must be in the middle, and the left- and right-hand sides must be constants.\n\nThe bounds on the constraints must match the NLPBoundsPairs passed to NLPBlockData.\n\nInitialize\n\nBefore calling this function, you must call initialize with :ExprGraph.\n\nExample\n\nThis example uses the Test.HS071 evaluator.\n\njulia> import MathOptInterface as MOI\n\njulia> evaluator = MOI.Test.HS071(true);\n\njulia> MOI.initialize(evaluator, [:ExprGraph])\n\njulia> MOI.constraint_expr(evaluator, 1)\n:(x[MOI.VariableIndex(1)] * x[MOI.VariableIndex(2)] * x[MOI.VariableIndex(3)] * x[MOI.VariableIndex(4)] >= 25.0)\n\njulia> MOI.constraint_expr(evaluator, 2)\n:(x[MOI.VariableIndex(1)] ^ 2 + x[MOI.VariableIndex(2)] ^ 2 + x[MOI.VariableIndex(3)] ^ 2 + x[MOI.VariableIndex(4)] ^ 2 == 40.0)\n\n\n\n\n\n","category":"function"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"EditURL = \"https://github.com/jump-dev/Cbc.jl/blob/v1.2.0/README.md\"","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"(Image: )","category":"page"},{"location":"packages/Cbc/#Cbc.jl","page":"jump-dev/Cbc.jl","title":"Cbc.jl","text":"","category":"section"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"(Image: Build Status) (Image: codecov)","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"Cbc.jl is a wrapper for the COIN-OR Branch and Cut (Cbc) solver.","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"The wrapper has two components:","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"a thin wrapper around the complete C API\nan interface to MathOptInterface","category":"page"},{"location":"packages/Cbc/#Affiliation","page":"jump-dev/Cbc.jl","title":"Affiliation","text":"","category":"section"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"This wrapper is maintained by the JuMP community and is not a COIN-OR project.","category":"page"},{"location":"packages/Cbc/#License","page":"jump-dev/Cbc.jl","title":"License","text":"","category":"section"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"Cbc.jl is licensed under the MIT License.","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"The underlying solver, coin-or/Cbc, is licensed under the Eclipse public license.","category":"page"},{"location":"packages/Cbc/#Installation","page":"jump-dev/Cbc.jl","title":"Installation","text":"","category":"section"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"Install Cbc using Pkg.add:","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"import Pkg\nPkg.add(\"Cbc\")","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"In addition to installing the Cbc.jl package, this will also download and install the Cbc binaries. You do not need to install Cbc separately.","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"To use a custom binary, read the Custom solver binaries section of the JuMP documentation.","category":"page"},{"location":"packages/Cbc/#Use-with-JuMP","page":"jump-dev/Cbc.jl","title":"Use with JuMP","text":"","category":"section"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"To use Cbc with JuMP, use Cbc.Optimizer:","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"using JuMP, Cbc\nmodel = Model(Cbc.Optimizer)\nset_attribute(model, \"logLevel\", 1)","category":"page"},{"location":"packages/Cbc/#MathOptInterface-API","page":"jump-dev/Cbc.jl","title":"MathOptInterface API","text":"","category":"section"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"The COIN Branch-and-Cut (Cbc) optimizer supports the following constraints and attributes.","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"List of supported objective functions:","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"List of supported variable types:","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"MOI.Reals","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"List of supported constraint types:","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"MOI.ScalarAffineFunction{Float64} in MOI.EqualTo{Float64}\nMOI.ScalarAffineFunction{Float64} in MOI.GreaterThan{Float64}\nMOI.ScalarAffineFunction{Float64} in MOI.Interval{Float64}\nMOI.ScalarAffineFunction{Float64} in MOI.LessThan{Float64}\nMOI.VariableIndex in MOI.EqualTo{Float64}\nMOI.VariableIndex in MOI.GreaterThan{Float64}\nMOI.VariableIndex in MOI.Integer\nMOI.VariableIndex in MOI.Interval{Float64}\nMOI.VariableIndex in MOI.LessThan{Float64}\nMOI.VariableIndex in MOI.ZeroOne\nMOI.VectorOfVariables in MOI.SOS1{Float64}\nMOI.VectorOfVariables in MOI.SOS2{Float64}","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"List of supported model attributes:","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"Cbc.Status\nCbc.SecondaryStatus\nMOI.DualStatus\nMOI.NodeCount\nMOI.NumberOfVariables\nMOI.ObjectiveBound\nMOI.ObjectiveSense\nMOI.ObjectiveValue\nMOI.PrimalStatus\nMOI.RelativeGap\nMOI.ResultCount\nMOI.SolveTimeSec\nMOI.TerminationStatus","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"List of supported optimizer attributes:","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"Cbc.SetVariableNames\nMOI.AbsoluteGapTolerance\nMOI.NumberOfThreads\nMOI.RawOptimizerAttribute\nMOI.RelativeGapTolerance\nMOI.Silent\nMOI.SolverName\nMOI.SolverVersion\nMOI.TimeLimitSec","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"List of supported variable attributes:","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"MOI.VariablePrimal\nMOI.VariablePrimalStart\nMOI.VariableName","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"List of supported constraint attributes:","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"MOI.ConstraintPrimal","category":"page"},{"location":"packages/Cbc/#Options","page":"jump-dev/Cbc.jl","title":"Options","text":"","category":"section"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"Options are, unfortunately, not well documented.","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"The following options are likely to be the most useful:","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"Parameter Example Explanation\nseconds 60.0 Solution timeout limit\nlogLevel 2 Set to 0 to disable solution output\nmaxSolutions 1 Terminate after this many feasible solutions have been found\nmaxNodes 1 Terminate after this many branch-and-bound nodes have been evaluated\nallowableGap 0.05 Terminate after optimality gap is less than this value (on an absolute scale)\nratioGap 0.05 Terminate after optimality gap is smaller than this relative fraction\nthreads 1 Set the number of threads to use for parallel branch & bound","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"The complete list of parameters can be found by running the cbc executable and typing ? at the prompt.","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"Start the cbc executable from Julia as follows:","category":"page"},{"location":"packages/Cbc/","page":"jump-dev/Cbc.jl","title":"jump-dev/Cbc.jl","text":"using Cbc_jll\nCbc_jll.cbc() do exe\n run(`$(exe)`)\nend","category":"page"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"EditURL = \"geographic_clustering.jl\"","category":"page"},{"location":"tutorials/linear/geographic_clustering/#Geographical-clustering","page":"Geographical clustering","title":"Geographical clustering","text":"","category":"section"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"This tutorial was generated using Literate.jl. Download the source as a .jl file.","category":"page"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"This tutorial was originally contributed by Matthew Helm and Mathieu Tanneau.","category":"page"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"The goal of this exercise is to cluster n cities into k groups, minimizing the total pairwise distance between cities and ensuring that the variance in the total populations of each group is relatively small.","category":"page"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"This tutorial uses the following packages:","category":"page"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"using JuMP\nimport DataFrames\nimport HiGHS\nimport LinearAlgebra","category":"page"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"For this example, we'll use the 20 most populous cities in the United States.","category":"page"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"cities = DataFrames.DataFrame(\n Union{String,Float64}[\n \"New York, NY\" 8.405 40.7127 -74.0059\n \"Los Angeles, CA\" 3.884 34.0522 -118.2436\n \"Chicago, IL\" 2.718 41.8781 -87.6297\n \"Houston, TX\" 2.195 29.7604 -95.3698\n \"Philadelphia, PA\" 1.553 39.9525 -75.1652\n \"Phoenix, AZ\" 1.513 33.4483 -112.0740\n \"San Antonio, TX\" 1.409 29.4241 -98.4936\n \"San Diego, CA\" 1.355 32.7157 -117.1610\n \"Dallas, TX\" 1.257 32.7766 -96.7969\n \"San Jose, CA\" 0.998 37.3382 -121.8863\n \"Austin, TX\" 0.885 30.2671 -97.7430\n \"Indianapolis, IN\" 0.843 39.7684 -86.1580\n \"Jacksonville, FL\" 0.842 30.3321 -81.6556\n \"San Francisco, CA\" 0.837 37.7749 -122.4194\n \"Columbus, OH\" 0.822 39.9611 -82.9987\n \"Charlotte, NC\" 0.792 35.2270 -80.8431\n \"Fort Worth, TX\" 0.792 32.7554 -97.3307\n \"Detroit, MI\" 0.688 42.3314 -83.0457\n \"El Paso, TX\" 0.674 31.7775 -106.4424\n \"Memphis, TN\" 0.653 35.1495 -90.0489\n ],\n [\"city\", \"population\", \"lat\", \"lon\"],\n)","category":"page"},{"location":"tutorials/linear/geographic_clustering/#Model-Specifics","page":"Geographical clustering","title":"Model Specifics","text":"","category":"section"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"We will cluster these 20 cities into 3 different groups and we will assume that the ideal or target population P for a group is simply the total population of the 20 cities divided by 3:","category":"page"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"n = size(cities, 1)\nk = 3\nP = sum(cities.population) / k","category":"page"},{"location":"tutorials/linear/geographic_clustering/#Obtaining-the-distances-between-each-city","page":"Geographical clustering","title":"Obtaining the distances between each city","text":"","category":"section"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"Let's compute the pairwise Haversine distance between each of the cities in our data set and store the result in a variable we'll call dm:","category":"page"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"\"\"\"\n haversine(lat1, long1, lat2, long2, r = 6372.8)\n\nCompute the haversine distance between two points on a sphere of radius `r`,\nwhere the points are given by the latitude/longitude pairs `lat1/long1` and\n`lat2/long2` (in degrees).\n\"\"\"\nfunction haversine(lat1, long1, lat2, long2, r = 6372.8)\n lat1, long1 = deg2rad(lat1), deg2rad(long1)\n lat2, long2 = deg2rad(lat2), deg2rad(long2)\n hav(a, b) = sin((b - a) / 2)^2\n inner_term = hav(lat1, lat2) + cos(lat1) * cos(lat2) * hav(long1, long2)\n d = 2 * r * asin(sqrt(inner_term))\n # Round distance to nearest kilometer.\n return round(Int, d)\nend","category":"page"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"Our distance matrix is symmetric so we'll convert it to a LowerTriangular matrix so that we can better interpret the objective value of our model:","category":"page"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"dm = LinearAlgebra.LowerTriangular([\n haversine(cities.lat[i], cities.lon[i], cities.lat[j], cities.lon[j])\n for i in 1:n, j in 1:n\n])","category":"page"},{"location":"tutorials/linear/geographic_clustering/#Build-the-model","page":"Geographical clustering","title":"Build the model","text":"","category":"section"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"Now that we have the basics taken care of, we can set up our model, create decision variables, add constraints, and then solve.","category":"page"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"First, we'll set up a model that leverages the Cbc solver. Next, we'll set up a binary variable x_ik that takes the value 1 if city i is in group k and 0 otherwise. Each city must be in a group, so we'll add the constraint sum_k x_ik = 1 for every i.","category":"page"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"model = Model(HiGHS.Optimizer)\nset_silent(model)\n@variable(model, x[1:n, 1:k], Bin)\n@constraint(model, [i = 1:n], sum(x[i, :]) == 1);\n# To reduce symmetry, we fix the first city to belong to the first group.\nfix(x[1, 1], 1; force = true)","category":"page"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"The total population of a group k is Q_k = sum_ix_ikq_i where q_i is simply the i-th value from the population column in our cities DataFrame. Let's add constraints so that alpha leq (Q_k - P) leq beta. We'll set alpha equal to -3 million and beta equal to 3. By adjusting these thresholds you'll find that there is a tradeoff between having relatively even populations between groups and having geographically close cities within each group. In other words, the larger the absolute values of alpha and beta, the closer together the cities in a group will be but the variance between the group populations will be higher.","category":"page"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"@variable(model, -3 <= population_diff[1:k] <= 3)\n@constraint(model, population_diff .== x' * cities.population .- P)","category":"page"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"Now we need to add one last binary variable z_ij to our model that we'll use to compute the total distance between the cities in our groups, defined as sum_ijd_ijz_ij. Variable z_ij will equal 1 if cities i and j are in the same group, and 0 if they are not in the same group.","category":"page"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"To ensure that z_ij = 1 if and only if cities i and j are in the same group, we add the constraints z_ij geq x_ik + x_jk - 1 for every pair ij and every k:","category":"page"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"@variable(model, z[i = 1:n, j = 1:i], Bin)\nfor k in 1:k, i in 1:n, j in 1:i\n @constraint(model, z[i, j] >= x[i, k] + x[j, k] - 1)\nend","category":"page"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"We can now add an objective to our model which will simply be to minimize the dot product of z and our distance matrix, dm.","category":"page"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"@objective(model, Min, sum(dm[i, j] * z[i, j] for i in 1:n, j in 1:i));\nnothing #hide","category":"page"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"We can then call optimize! and review the results.","category":"page"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"optimize!(model)\n@assert is_solved_and_feasible(model)","category":"page"},{"location":"tutorials/linear/geographic_clustering/#Reviewing-the-Results","page":"Geographical clustering","title":"Reviewing the Results","text":"","category":"section"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"Now that we have results, we can add a column to our cities DataFrame for the group and then loop through our x variable to assign each city to its group. Once we have that, we can look at the total population for each group and also look at the cities in each group to verify that they are grouped by geographic proximity.","category":"page"},{"location":"tutorials/linear/geographic_clustering/","page":"Geographical clustering","title":"Geographical clustering","text":"cities.group = zeros(n)\n\nfor i in 1:n, j in 1:k\n if round(Int, value(x[i, j])) == 1\n cities.group[i] = j\n end\nend\n\nfor group in DataFrames.groupby(cities, :group)\n @show group\n println(\"\")\n @show sum(group.population)\n println(\"\")\nend","category":"page"},{"location":"packages/DisjunctiveProgramming/","page":"hdavid16/DisjunctiveProgramming.jl","title":"hdavid16/DisjunctiveProgramming.jl","text":"EditURL = \"https://github.com/hdavid16/DisjunctiveProgramming.jl/blob/6a4d0ac4a7484e52b1dbff9cee310a73d2d23e81/docs/jump/README.md\"","category":"page"},{"location":"packages/DisjunctiveProgramming/","page":"hdavid16/DisjunctiveProgramming.jl","title":"hdavid16/DisjunctiveProgramming.jl","text":"(Image: Logo)","category":"page"},{"location":"packages/DisjunctiveProgramming/","page":"hdavid16/DisjunctiveProgramming.jl","title":"hdavid16/DisjunctiveProgramming.jl","text":"DisjunctiveProgramming.jl is a JuMP extension for expressing and solving Generalized Disjunctive Programs. Generalized Disjunctive Programming (GDP) is a modeling paradigm for easily modeling logical conditions which can be reformulated into a variety of mixed-integer programs. ","category":"page"},{"location":"packages/DisjunctiveProgramming/","page":"hdavid16/DisjunctiveProgramming.jl","title":"hdavid16/DisjunctiveProgramming.jl","text":"Current Version Documentation Build Status Citation\n(Image: ) (Image: ) (Image: Build Status) (Image: codecov.io) (Image: arXiv)","category":"page"},{"location":"packages/DisjunctiveProgramming/","page":"hdavid16/DisjunctiveProgramming.jl","title":"hdavid16/DisjunctiveProgramming.jl","text":"DisjunctiveProgramming builds upon JuMP to add support GDP modeling objects which include:","category":"page"},{"location":"packages/DisjunctiveProgramming/","page":"hdavid16/DisjunctiveProgramming.jl","title":"hdavid16/DisjunctiveProgramming.jl","text":"Logical variables (Y in textFalse textTrue)\nDisjunctions\nLogical constraints (also known as propositions)\nCardinality constraints","category":"page"},{"location":"packages/DisjunctiveProgramming/","page":"hdavid16/DisjunctiveProgramming.jl","title":"hdavid16/DisjunctiveProgramming.jl","text":"It also supports automatic conversion of the GDP model into a regular mixed-integer JuMP model via a variety of reformulations which include:","category":"page"},{"location":"packages/DisjunctiveProgramming/","page":"hdavid16/DisjunctiveProgramming.jl","title":"hdavid16/DisjunctiveProgramming.jl","text":"Big-M\nHull\nIndicator constraints","category":"page"},{"location":"packages/DisjunctiveProgramming/","page":"hdavid16/DisjunctiveProgramming.jl","title":"hdavid16/DisjunctiveProgramming.jl","text":"Moreover, DisjunctiveProgramming provides an extension API to easily add new reformulation methods.","category":"page"},{"location":"packages/DisjunctiveProgramming/#License","page":"hdavid16/DisjunctiveProgramming.jl","title":"License","text":"","category":"section"},{"location":"packages/DisjunctiveProgramming/","page":"hdavid16/DisjunctiveProgramming.jl","title":"hdavid16/DisjunctiveProgramming.jl","text":"InfiniteOpt is licensed under the MIT \"Expat\" license.","category":"page"},{"location":"packages/DisjunctiveProgramming/#Installation","page":"hdavid16/DisjunctiveProgramming.jl","title":"Installation","text":"","category":"section"},{"location":"packages/DisjunctiveProgramming/","page":"hdavid16/DisjunctiveProgramming.jl","title":"hdavid16/DisjunctiveProgramming.jl","text":"DisjunctiveProgramming.jl is a registered Julia package and can be installed by entering the following in the REPL.","category":"page"},{"location":"packages/DisjunctiveProgramming/","page":"hdavid16/DisjunctiveProgramming.jl","title":"hdavid16/DisjunctiveProgramming.jl","text":"julia> import Pkg; Pkg.add(\"DisjunctiveProgramming\")","category":"page"},{"location":"packages/DisjunctiveProgramming/#Documentation","page":"hdavid16/DisjunctiveProgramming.jl","title":"Documentation","text":"","category":"section"},{"location":"packages/DisjunctiveProgramming/","page":"hdavid16/DisjunctiveProgramming.jl","title":"hdavid16/DisjunctiveProgramming.jl","text":"Please visit our documentation pages to learn more.","category":"page"},{"location":"packages/DisjunctiveProgramming/#Citing","page":"hdavid16/DisjunctiveProgramming.jl","title":"Citing","text":"","category":"section"},{"location":"packages/DisjunctiveProgramming/","page":"hdavid16/DisjunctiveProgramming.jl","title":"hdavid16/DisjunctiveProgramming.jl","text":"(Image: arXiv)","category":"page"},{"location":"packages/DisjunctiveProgramming/","page":"hdavid16/DisjunctiveProgramming.jl","title":"hdavid16/DisjunctiveProgramming.jl","text":"If you use DisjunctiveProgramming.jl in your research, we would greatly appreciate your citing it.","category":"page"},{"location":"packages/DisjunctiveProgramming/","page":"hdavid16/DisjunctiveProgramming.jl","title":"hdavid16/DisjunctiveProgramming.jl","text":"@article{perez2023disjunctiveprogramming,\n title={DisjunctiveProgramming. jl: Generalized Disjunctive Programming Models and Algorithms for JuMP},\n author={Perez, Hector D and Joshi, Shivank and Grossmann, Ignacio E},\n journal={arXiv preprint arXiv:2304.10492},\n year={2023}\n}","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"CurrentModule = JuMP\nDocTestSetup = quote\n using JuMP\nend\nDocTestFilters = [r\"≤|<=\", r\"≥|>=\", r\" == | = \", r\" ∈ | in \", r\"MathOptInterface|MOI\"]","category":"page"},{"location":"manual/objective/#Objectives","page":"Objectives","title":"Objectives","text":"","category":"section"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"This page describes macros and functions related to linear and quadratic objective functions only, unless otherwise indicated. For nonlinear objective functions, see Nonlinear Modeling.","category":"page"},{"location":"manual/objective/#Set-a-linear-objective","page":"Objectives","title":"Set a linear objective","text":"","category":"section"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"Use the @objective macro to set a linear objective function.","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"Use Min to create a minimization objective:","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"julia> model = Model();\n\njulia> @variable(model, x);\n\njulia> @objective(model, Min, 2x + 1)\n2 x + 1","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"Use Max to create a maximization objective:","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"julia> model = Model();\n\njulia> @variable(model, x);\n\njulia> @objective(model, Max, 2x + 1)\n2 x + 1","category":"page"},{"location":"manual/objective/#Set-a-quadratic-objective","page":"Objectives","title":"Set a quadratic objective","text":"","category":"section"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"Use the @objective macro to set a quadratic objective function.","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"Use ^2 to have a variable squared:","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"julia> model = Model();\n\njulia> @variable(model, x);\n\njulia> @objective(model, Min, x^2 + 2x + 1)\nx² + 2 x + 1","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"You can also have bilinear terms between variables:","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"julia> model = Model();\n\njulia> @variable(model, x)\nx\n\njulia> @variable(model, y)\ny\n\njulia> @objective(model, Max, x * y + x + y)\nx*y + x + y","category":"page"},{"location":"manual/objective/#Set-a-nonlinear-objective","page":"Objectives","title":"Set a nonlinear objective","text":"","category":"section"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"Use the @objective macro to set a nonlinear objective function:","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"julia> model = Model();\n\njulia> @variable(model, x <= 1);\n\njulia> @objective(model, Max, log(x))\nlog(x)","category":"page"},{"location":"manual/objective/#Query-the-objective-function","page":"Objectives","title":"Query the objective function","text":"","category":"section"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"Use objective_function to return the current objective function.","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"julia> model = Model();\n\njulia> @variable(model, x);\n\njulia> @objective(model, Min, 2x + 1)\n2 x + 1\n\njulia> objective_function(model)\n2 x + 1","category":"page"},{"location":"manual/objective/#Evaluate-the-objective-function-at-a-point","page":"Objectives","title":"Evaluate the objective function at a point","text":"","category":"section"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"Use value to evaluate an objective function at a point specifying values for variables.","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"julia> model = Model();\n\njulia> @variable(model, x[1:2]);\n\njulia> @objective(model, Min, 2x[1]^2 + x[1] + 0.5*x[2])\n2 x[1]² + x[1] + 0.5 x[2]\n\njulia> f = objective_function(model)\n2 x[1]² + x[1] + 0.5 x[2]\n\njulia> point = Dict(x[1] => 2.0, x[2] => 1.0);\n\njulia> value(z -> point[z], f)\n10.5","category":"page"},{"location":"manual/objective/#Query-the-objective-sense","page":"Objectives","title":"Query the objective sense","text":"","category":"section"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"Use objective_sense to return the current objective sense.","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"julia> model = Model();\n\njulia> @variable(model, x);\n\njulia> @objective(model, Min, 2x + 1)\n2 x + 1\n\njulia> objective_sense(model)\nMIN_SENSE::OptimizationSense = 0","category":"page"},{"location":"manual/objective/#Modify-an-objective","page":"Objectives","title":"Modify an objective","text":"","category":"section"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"To modify an objective, call @objective with the new objective function.","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"julia> model = Model();\n\njulia> @variable(model, x);\n\njulia> @objective(model, Min, 2x)\n2 x\n\njulia> @objective(model, Max, -2x)\n-2 x","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"Alternatively, use set_objective_function.","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"julia> model = Model();\n\njulia> @variable(model, x);\n\njulia> @objective(model, Min, 2x)\n2 x\n\njulia> new_objective = @expression(model, -2 * x)\n-2 x\n\njulia> set_objective_function(model, new_objective)","category":"page"},{"location":"manual/objective/#Modify-an-objective-coefficient","page":"Objectives","title":"Modify an objective coefficient","text":"","category":"section"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"Use set_objective_coefficient to modify an objective coefficient.","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"julia> model = Model();\n\njulia> @variable(model, x);\n\njulia> @objective(model, Min, 2x)\n2 x\n\njulia> set_objective_coefficient(model, x, 3)\n\njulia> objective_function(model)\n3 x","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"Use set_objective_coefficient with two variables to modify a quadratic objective coefficient:","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"julia> model = Model();\n\njulia> @variable(model, x);\n\njulia> @variable(model, y);\n\njulia> @objective(model, Min, x^2 + x * y)\nx² + x*y\n\njulia> set_objective_coefficient(model, x, x, 2)\n\njulia> set_objective_coefficient(model, x, y, 3)\n\njulia> objective_function(model)\n2 x² + 3 x*y","category":"page"},{"location":"manual/objective/#Modify-the-objective-sense","page":"Objectives","title":"Modify the objective sense","text":"","category":"section"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"Use set_objective_sense to modify the objective sense.","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"julia> model = Model();\n\njulia> @variable(model, x);\n\njulia> @objective(model, Min, 2x)\n2 x\n\njulia> objective_sense(model)\nMIN_SENSE::OptimizationSense = 0\n\njulia> set_objective_sense(model, MAX_SENSE);\n\njulia> objective_sense(model)\nMAX_SENSE::OptimizationSense = 1","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"Alternatively, call @objective and pass the existing objective function.","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"julia> model = Model();\n\njulia> @variable(model, x);\n\njulia> @objective(model, Min, 2x)\n2 x\n\njulia> @objective(model, Max, objective_function(model))\n2 x","category":"page"},{"location":"manual/objective/#Set-a-vector-valued-objective","page":"Objectives","title":"Set a vector-valued objective","text":"","category":"section"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"Define a multi-objective optimization problem by passing a vector of objectives:","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"julia> model = Model();\n\njulia> @variable(model, x[1:2]);\n\njulia> @objective(model, Min, [1 + x[1], 2 * x[2]])\n2-element Vector{AffExpr}:\n x[1] + 1\n 2 x[2]\n\njulia> f = objective_function(model)\n2-element Vector{AffExpr}:\n x[1] + 1\n 2 x[2]","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"tip: Tip\nThe Multi-objective knapsack tutorial provides an example of solving a multi-objective integer program.","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"In most cases, multi-objective optimization solvers will return multiple solutions, corresponding to points on the Pareto frontier. See Multiple solutions for information on how to query and work with multiple solutions.","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"Note that you must set a single objective sense, that is, you cannot have both minimization and maximization objectives. Work around this limitation by choosing Min and negating any objectives you want to maximize:","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"julia> model = Model();\n\njulia> @variable(model, x[1:2]);\n\njulia> @expression(model, obj1, 1 + x[1])\nx[1] + 1\n\njulia> @expression(model, obj2, 2 * x[1])\n2 x[1]\n\njulia> @objective(model, Min, [obj1, -obj2])\n2-element Vector{AffExpr}:\n x[1] + 1\n -2 x[1]","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"Defining your objectives as expressions allows flexibility in how you can solve variations of the same problem, with some objectives removed and constrained to be no worse that a fixed value.","category":"page"},{"location":"manual/objective/","page":"Objectives","title":"Objectives","text":"julia> model = Model();\n\njulia> @variable(model, x[1:2]);\n\njulia> @expression(model, obj1, 1 + x[1])\nx[1] + 1\n\njulia> @expression(model, obj2, 2 * x[1])\n2 x[1]\n\njulia> @expression(model, obj3, x[1] + x[2])\nx[1] + x[2]\n\njulia> @objective(model, Min, [obj1, obj2, obj3]) # Three-objective problem\n3-element Vector{AffExpr}:\n x[1] + 1\n 2 x[1]\n x[1] + x[2]\n\njulia> # optimize!(model), look at the solution, talk to stakeholders, then\n # decide you want to solve a new problem where the third objective is\n # removed and constrained to be better than 2.0.\n nothing\n\njulia> @objective(model, Min, [obj1, obj2]) # Two-objective problem\n2-element Vector{AffExpr}:\n x[1] + 1\n 2 x[1]\n\njulia> @constraint(model, obj3 <= 2.0)\nx[1] + x[2] ≤ 2","category":"page"},{"location":"moi/reference/callbacks/","page":"Callbacks","title":"Callbacks","text":"EditURL = \"https://github.com/jump-dev/MathOptInterface.jl/blob/v1.34.0/docs/src/reference/callbacks.md\"","category":"page"},{"location":"moi/reference/callbacks/","page":"Callbacks","title":"Callbacks","text":"CurrentModule = MathOptInterface\nDocTestSetup = quote\n import MathOptInterface as MOI\nend\nDocTestFilters = [r\"MathOptInterface|MOI\"]","category":"page"},{"location":"moi/reference/callbacks/#Callbacks","page":"Callbacks","title":"Callbacks","text":"","category":"section"},{"location":"moi/reference/callbacks/","page":"Callbacks","title":"Callbacks","text":"AbstractCallback\nAbstractSubmittable\nsubmit","category":"page"},{"location":"moi/reference/callbacks/#MathOptInterface.AbstractCallback","page":"Callbacks","title":"MathOptInterface.AbstractCallback","text":"abstract type AbstractCallback <: AbstractModelAttribute end\n\nAbstract type for a model attribute representing a callback function. The value set to subtypes of AbstractCallback is a function that may be called during optimize!. As optimize! is in progress, the result attributes (that is, the attributes attr such that is_set_by_optimize(attr)) may not be accessible from the callback, hence trying to get result attributes might throw a OptimizeInProgress error.\n\nAt most one callback of each type can be registered. If an optimizer already has a function for a callback type, and the user registers a new function, then the old one is replaced.\n\nThe value of the attribute should be a function taking only one argument, commonly called callback_data, that can be used for instance in LazyConstraintCallback, HeuristicCallback and UserCutCallback.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/callbacks/#MathOptInterface.AbstractSubmittable","page":"Callbacks","title":"MathOptInterface.AbstractSubmittable","text":"AbstractSubmittable\n\nAbstract supertype for objects that can be submitted to the model.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/callbacks/#MathOptInterface.submit","page":"Callbacks","title":"MathOptInterface.submit","text":"submit(\n optimizer::AbstractOptimizer,\n sub::AbstractSubmittable,\n values...,\n)::Nothing\n\nSubmit values to the submittable sub of the optimizer optimizer.\n\nAn UnsupportedSubmittable error is thrown if model does not support the attribute attr (see supports) and a SubmitNotAllowed error is thrown if it supports the submittable sub but it cannot be submitted.\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/callbacks/#Attributes","page":"Callbacks","title":"Attributes","text":"","category":"section"},{"location":"moi/reference/callbacks/","page":"Callbacks","title":"Callbacks","text":"CallbackNodeStatus\nCallbackVariablePrimal\nCallbackNodeStatusCode\nCALLBACK_NODE_STATUS_INTEGER\nCALLBACK_NODE_STATUS_FRACTIONAL\nCALLBACK_NODE_STATUS_UNKNOWN","category":"page"},{"location":"moi/reference/callbacks/#MathOptInterface.CallbackNodeStatus","page":"Callbacks","title":"MathOptInterface.CallbackNodeStatus","text":"CallbackNodeStatus(callback_data)\n\nAn optimizer attribute describing the (in)feasibility of the primal solution available from CallbackVariablePrimal during a callback identified by callback_data.\n\nReturns a CallbackNodeStatusCode Enum.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/callbacks/#MathOptInterface.CallbackVariablePrimal","page":"Callbacks","title":"MathOptInterface.CallbackVariablePrimal","text":"CallbackVariablePrimal(callback_data)\n\nA variable attribute for the assignment to some primal variable's value during the callback identified by callback_data.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/callbacks/#MathOptInterface.CallbackNodeStatusCode","page":"Callbacks","title":"MathOptInterface.CallbackNodeStatusCode","text":"CallbackNodeStatusCode\n\nAn Enum of possible return values from calling get with CallbackNodeStatus.\n\nValues\n\nPossible values are:\n\nCALLBACK_NODE_STATUS_INTEGER: the primal solution available from CallbackVariablePrimal is integer feasible.\nCALLBACK_NODE_STATUS_FRACTIONAL: the primal solution available from CallbackVariablePrimal is integer infeasible.\nCALLBACK_NODE_STATUS_UNKNOWN: the primal solution available from CallbackVariablePrimal might be integer feasible or infeasible.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/callbacks/#MathOptInterface.CALLBACK_NODE_STATUS_INTEGER","page":"Callbacks","title":"MathOptInterface.CALLBACK_NODE_STATUS_INTEGER","text":"CALLBACK_NODE_STATUS_INTEGER::CallbackNodeStatusCode\n\nAn instance of the CallbackNodeStatusCode enum.\n\nCALLBACK_NODE_STATUS_INTEGER: the primal solution available from CallbackVariablePrimal is integer feasible.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/callbacks/#MathOptInterface.CALLBACK_NODE_STATUS_FRACTIONAL","page":"Callbacks","title":"MathOptInterface.CALLBACK_NODE_STATUS_FRACTIONAL","text":"CALLBACK_NODE_STATUS_FRACTIONAL::CallbackNodeStatusCode\n\nAn instance of the CallbackNodeStatusCode enum.\n\nCALLBACK_NODE_STATUS_FRACTIONAL: the primal solution available from CallbackVariablePrimal is integer infeasible.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/callbacks/#MathOptInterface.CALLBACK_NODE_STATUS_UNKNOWN","page":"Callbacks","title":"MathOptInterface.CALLBACK_NODE_STATUS_UNKNOWN","text":"CALLBACK_NODE_STATUS_UNKNOWN::CallbackNodeStatusCode\n\nAn instance of the CallbackNodeStatusCode enum.\n\nCALLBACK_NODE_STATUS_UNKNOWN: the primal solution available from CallbackVariablePrimal might be integer feasible or infeasible.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/callbacks/#Lazy-constraints","page":"Callbacks","title":"Lazy constraints","text":"","category":"section"},{"location":"moi/reference/callbacks/","page":"Callbacks","title":"Callbacks","text":"LazyConstraintCallback\nLazyConstraint","category":"page"},{"location":"moi/reference/callbacks/#MathOptInterface.LazyConstraintCallback","page":"Callbacks","title":"MathOptInterface.LazyConstraintCallback","text":"LazyConstraintCallback() <: AbstractCallback\n\nThe callback can be used to reduce the feasible set given the current primal solution by submitting a LazyConstraint. For instance, it may be called at an incumbent of a mixed-integer problem. Note that there is no guarantee that the callback is called at every feasible primal solution.\n\nThe current primal solution is accessed through CallbackVariablePrimal. Trying to access other result attributes will throw OptimizeInProgress as discussed in AbstractCallback.\n\nExample\n\nx = MOI.add_variables(optimizer, 8)\nMOI.set(optimizer, MOI.LazyConstraintCallback(), callback_data -> begin\n sol = MOI.get(optimizer, MOI.CallbackVariablePrimal(callback_data), x)\n if # should add a lazy constraint\n func = # computes function\n set = # computes set\n MOI.submit(optimizer, MOI.LazyConstraint(callback_data), func, set)\n end\nend)\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/callbacks/#MathOptInterface.LazyConstraint","page":"Callbacks","title":"MathOptInterface.LazyConstraint","text":"LazyConstraint(callback_data)\n\nLazy constraint func-in-set submitted as func, set. The optimal solution returned by VariablePrimal will satisfy all lazy constraints that have been submitted.\n\nThis can be submitted only from the LazyConstraintCallback. The field callback_data is a solver-specific callback type that is passed as the argument to the feasible solution callback.\n\nExample\n\nSuppose x and y are VariableIndexs of optimizer. To add a LazyConstraint for 2x + 3y <= 1, write\n\nfunc = 2.0x + 3.0y\nset = MOI.LessThan(1.0)\nMOI.submit(optimizer, MOI.LazyConstraint(callback_data), func, set)\n\ninside a LazyConstraintCallback of data callback_data.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/callbacks/#User-cuts","page":"Callbacks","title":"User cuts","text":"","category":"section"},{"location":"moi/reference/callbacks/","page":"Callbacks","title":"Callbacks","text":"UserCutCallback\nUserCut","category":"page"},{"location":"moi/reference/callbacks/#MathOptInterface.UserCutCallback","page":"Callbacks","title":"MathOptInterface.UserCutCallback","text":"UserCutCallback() <: AbstractCallback\n\nThe callback can be used to submit UserCut given the current primal solution. For instance, it may be called at fractional (that is, non-integer) nodes in the branch and bound tree of a mixed-integer problem. Note that there is not guarantee that the callback is called everytime the solver has an infeasible solution.\n\nThe infeasible solution is accessed through CallbackVariablePrimal. Trying to access other result attributes will throw OptimizeInProgress as discussed in AbstractCallback.\n\nExample\n\nx = MOI.add_variables(optimizer, 8)\nMOI.set(optimizer, MOI.UserCutCallback(), callback_data -> begin\n sol = MOI.get(optimizer, MOI.CallbackVariablePrimal(callback_data), x)\n if # can find a user cut\n func = # computes function\n set = # computes set\n MOI.submit(optimizer, MOI.UserCut(callback_data), func, set)\n end\nend\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/callbacks/#MathOptInterface.UserCut","page":"Callbacks","title":"MathOptInterface.UserCut","text":"UserCut(callback_data)\n\nConstraint func-to-set suggested to help the solver detect the solution given by CallbackVariablePrimal as infeasible. The cut is submitted as func, set. Typically CallbackVariablePrimal will violate integrality constraints, and a cut would be of the form ScalarAffineFunction-in-LessThan or ScalarAffineFunction-in-GreaterThan. Note that, as opposed to LazyConstraint, the provided constraint cannot modify the feasible set, the constraint should be redundant, for example, it may be a consequence of affine and integrality constraints.\n\nThis can be submitted only from the UserCutCallback. The field callback_data is a solver-specific callback type that is passed as the argument to the infeasible solution callback.\n\nNote that the solver may silently ignore the provided constraint.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/callbacks/#Heuristic-solutions","page":"Callbacks","title":"Heuristic solutions","text":"","category":"section"},{"location":"moi/reference/callbacks/","page":"Callbacks","title":"Callbacks","text":"HeuristicCallback\nHeuristicSolution\nHeuristicSolutionStatus\nHEURISTIC_SOLUTION_ACCEPTED\nHEURISTIC_SOLUTION_REJECTED\nHEURISTIC_SOLUTION_UNKNOWN","category":"page"},{"location":"moi/reference/callbacks/#MathOptInterface.HeuristicCallback","page":"Callbacks","title":"MathOptInterface.HeuristicCallback","text":"HeuristicCallback() <: AbstractCallback\n\nThe callback can be used to submit HeuristicSolution given the current primal solution. For example, it may be called at fractional (that is, non-integer) nodes in the branch and bound tree of a mixed-integer problem. Note that there is no guarantee that the callback is called every time the solver has an infeasible solution.\n\nThe current primal solution is accessed through CallbackVariablePrimal. Trying to access other result attributes will throw OptimizeInProgress as discussed in AbstractCallback.\n\nExample\n\nx = MOI.add_variables(optimizer, 8)\nMOI.set(optimizer, MOI.HeuristicCallback(), callback_data -> begin\n sol = MOI.get(optimizer, MOI.CallbackVariablePrimal(callback_data), x)\n if # can find a heuristic solution\n values = # computes heuristic solution\n MOI.submit(optimizer, MOI.HeuristicSolution(callback_data), x,\n values)\n end\nend\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/callbacks/#MathOptInterface.HeuristicSolution","page":"Callbacks","title":"MathOptInterface.HeuristicSolution","text":"HeuristicSolution(callback_data)\n\nHeuristically obtained feasible solution. The solution is submitted as variables, values where values[i] gives the value of variables[i], similarly to set. The submit call returns a HeuristicSolutionStatus indicating whether the provided solution was accepted or rejected.\n\nThis can be submitted only from the HeuristicCallback. The field callback_data is a solver-specific callback type that is passed as the argument to the heuristic callback.\n\nSome solvers require a complete solution, others only partial solutions.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/callbacks/#MathOptInterface.HeuristicSolutionStatus","page":"Callbacks","title":"MathOptInterface.HeuristicSolutionStatus","text":"HeuristicSolutionStatus\n\nAn Enum of possible return values for submit with HeuristicSolution. This informs whether the heuristic solution was accepted or rejected.\n\nValues\n\nPossible values are:\n\nHEURISTIC_SOLUTION_ACCEPTED: The heuristic solution was accepted\nHEURISTIC_SOLUTION_REJECTED: The heuristic solution was rejected\nHEURISTIC_SOLUTION_UNKNOWN: No information available on the acceptance\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/callbacks/#MathOptInterface.HEURISTIC_SOLUTION_ACCEPTED","page":"Callbacks","title":"MathOptInterface.HEURISTIC_SOLUTION_ACCEPTED","text":"HEURISTIC_SOLUTION_ACCEPTED::HeuristicSolutionStatus\n\nAn instance of the HeuristicSolutionStatus enum.\n\nHEURISTIC_SOLUTION_ACCEPTED: The heuristic solution was accepted\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/callbacks/#MathOptInterface.HEURISTIC_SOLUTION_REJECTED","page":"Callbacks","title":"MathOptInterface.HEURISTIC_SOLUTION_REJECTED","text":"HEURISTIC_SOLUTION_REJECTED::HeuristicSolutionStatus\n\nAn instance of the HeuristicSolutionStatus enum.\n\nHEURISTIC_SOLUTION_REJECTED: The heuristic solution was rejected\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/callbacks/#MathOptInterface.HEURISTIC_SOLUTION_UNKNOWN","page":"Callbacks","title":"MathOptInterface.HEURISTIC_SOLUTION_UNKNOWN","text":"HEURISTIC_SOLUTION_UNKNOWN::HeuristicSolutionStatus\n\nAn instance of the HeuristicSolutionStatus enum.\n\nHEURISTIC_SOLUTION_UNKNOWN: No information available on the acceptance\n\n\n\n\n\n","category":"constant"},{"location":"packages/SDDP/","page":"odow/SDDP.jl","title":"odow/SDDP.jl","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/v1.9.0/README.md\"","category":"page"},{"location":"packages/SDDP/","page":"odow/SDDP.jl","title":"odow/SDDP.jl","text":"\"logo\"","category":"page"},{"location":"packages/SDDP/#SDDP.jl","page":"odow/SDDP.jl","title":"SDDP.jl","text":"","category":"section"},{"location":"packages/SDDP/","page":"odow/SDDP.jl","title":"odow/SDDP.jl","text":"(Image: Build Status) (Image: codecov)","category":"page"},{"location":"packages/SDDP/","page":"odow/SDDP.jl","title":"odow/SDDP.jl","text":"SDDP.jl is a JuMP extension for solving large convex multistage stochastic programming problems using stochastic dual dynamic programming.","category":"page"},{"location":"packages/SDDP/#License","page":"odow/SDDP.jl","title":"License","text":"","category":"section"},{"location":"packages/SDDP/","page":"odow/SDDP.jl","title":"odow/SDDP.jl","text":"SDDP.jl is licensed under the MPL 2.0 license.","category":"page"},{"location":"packages/SDDP/#Documentation","page":"odow/SDDP.jl","title":"Documentation","text":"","category":"section"},{"location":"packages/SDDP/","page":"odow/SDDP.jl","title":"odow/SDDP.jl","text":"You can find the documentation at sddp.dev.","category":"page"},{"location":"packages/SDDP/#Help","page":"odow/SDDP.jl","title":"Help","text":"","category":"section"},{"location":"packages/SDDP/","page":"odow/SDDP.jl","title":"odow/SDDP.jl","text":"If you need help, please open a GitHub issue.","category":"page"},{"location":"packages/Plasmo/","page":"plasmo-dev/Plasmo.jl","title":"plasmo-dev/Plasmo.jl","text":"EditURL = \"https://github.com/plasmo-dev/Plasmo.jl/blob/v0.6.4/README.md\"","category":"page"},{"location":"packages/Plasmo/","page":"plasmo-dev/Plasmo.jl","title":"plasmo-dev/Plasmo.jl","text":"","category":"page"},{"location":"packages/Plasmo/","page":"plasmo-dev/Plasmo.jl","title":"plasmo-dev/Plasmo.jl","text":"(Image: CI) (Image: codecov) (Image: ) (Image: ) (Image: DOI) (Image: Code Style: Blue)","category":"page"},{"location":"packages/Plasmo/#Plasmo.jl","page":"plasmo-dev/Plasmo.jl","title":"Plasmo.jl","text":"","category":"section"},{"location":"packages/Plasmo/","page":"plasmo-dev/Plasmo.jl","title":"plasmo-dev/Plasmo.jl","text":"(Platform for Scalable Modeling and Optimization)","category":"page"},{"location":"packages/Plasmo/","page":"plasmo-dev/Plasmo.jl","title":"plasmo-dev/Plasmo.jl","text":"[!NOTE] Plasmo.jl has undergone significant refactorization with the release of version 0.6. While most syntax should still work, we advise checking out the documentation for the latest updates and filing an issue if a v0.5 model produces errors.","category":"page"},{"location":"packages/Plasmo/","page":"plasmo-dev/Plasmo.jl","title":"plasmo-dev/Plasmo.jl","text":"Plasmo.jl is a graph-based algebraic modeling framework for building, managing, and solving optimization problems that utilizes graph-theoretic concepts and modular data structures. The package extends JuMP.jl to offer concise syntax, interfaces with MathOptInterface.jl to access standard optimization solvers, and utilizes Graphs.jl to provide graph analysis and processing methods. Plasmo.jl facilitates developing optimization models for networked systems such as supply chains, power systems, industrial processes, or any coupled system that involves multiple components and connections. The package also acts as a high-level platform to develop customized optimization-based decomposition techniques and meta-algorithms to optimize problems over large systems.","category":"page"},{"location":"packages/Plasmo/#Overview","page":"plasmo-dev/Plasmo.jl","title":"Overview","text":"","category":"section"},{"location":"packages/Plasmo/","page":"plasmo-dev/Plasmo.jl","title":"plasmo-dev/Plasmo.jl","text":"The core object in Plasmo.jl is the OptiGraph, a graph data structure that represents optimization problems as a set of optinodes and optiedges. Optinodes encapsulate variables, expressions, and constraints (and objective functions) as modular models and edges encapsulate linking constraints that couple variables across optinodes. Optigraphs can be embedded within other optigraphs to induce nested hierarchical structures, or they can be partitioned using different graph projections and partitioning algorithms to create new decomposition structures.","category":"page"},{"location":"packages/Plasmo/","page":"plasmo-dev/Plasmo.jl","title":"plasmo-dev/Plasmo.jl","text":"The core data structure in Plasmo.jl is the OptiGraph. The optigraph contains a set of optinodes which represent self-contained optimization problems and optiedges that represent coupling between optinodes (which produces an underlying hypergraph structure of optinodes and optiedges). Optigraphs can further be embedded within other optigraphs to create nested hierarchical graph structures. The graph structures obtained using Plasmo.jl can be used for simple model and data management, but they can also be used to perform graph partitioning or develop interfaces to structured optimization solvers.","category":"page"},{"location":"packages/Plasmo/#License","page":"plasmo-dev/Plasmo.jl","title":"License","text":"","category":"section"},{"location":"packages/Plasmo/","page":"plasmo-dev/Plasmo.jl","title":"plasmo-dev/Plasmo.jl","text":"Plasmo.jl is licensed under the MPL 2.0 license.","category":"page"},{"location":"packages/Plasmo/#Installation","page":"plasmo-dev/Plasmo.jl","title":"Installation","text":"","category":"section"},{"location":"packages/Plasmo/","page":"plasmo-dev/Plasmo.jl","title":"plasmo-dev/Plasmo.jl","text":"Install Plasmo using Pkg.add:","category":"page"},{"location":"packages/Plasmo/","page":"plasmo-dev/Plasmo.jl","title":"plasmo-dev/Plasmo.jl","text":"import Pkg\nPkg.add(\"Plasmo\")","category":"page"},{"location":"packages/Plasmo/#Documentation","page":"plasmo-dev/Plasmo.jl","title":"Documentation","text":"","category":"section"},{"location":"packages/Plasmo/","page":"plasmo-dev/Plasmo.jl","title":"plasmo-dev/Plasmo.jl","text":"The current documentation is available through GitHub Pages. Additional examples can be found in the examples folder.","category":"page"},{"location":"packages/Plasmo/#Simple-Example","page":"plasmo-dev/Plasmo.jl","title":"Simple Example","text":"","category":"section"},{"location":"packages/Plasmo/","page":"plasmo-dev/Plasmo.jl","title":"plasmo-dev/Plasmo.jl","text":"using Plasmo\nusing Ipopt\n\n#create an optigraph\ngraph = OptiGraph()\n\n#add nodes to an optigraph\n@optinode(graph, n1)\n@optinode(graph, n2)\n\n#add variables and constraints to nodes\n@variable(n1, 0 <= x <= 2)\n@variable(n1, 0 <= y <= 3)\n@constraint(n1, x+y <= 4)\n\n@variable(n2,x)\n@constraint(n2, exp(x) >= 2)\n\n#add linking constraints that couple nodes\n@linkconstraint(graph, n1[:x] == n2[:x])\n\n# set an optigraph objective\n@objective(graph, Min, n1[:x] + n2[:x])\n\n#optimize with Ipopt\nset_optimizer(graph, Ipopt.Optimizer)\noptimize!(graph)\n\n#Print solution values\nprintln(\"n1[:x] = \", value(n1[:x]))\nprintln(\"n2[:x] = \", value(n2[:x]))","category":"page"},{"location":"packages/Plasmo/#Acknowledgments","page":"plasmo-dev/Plasmo.jl","title":"Acknowledgments","text":"","category":"section"},{"location":"packages/Plasmo/","page":"plasmo-dev/Plasmo.jl","title":"plasmo-dev/Plasmo.jl","text":"This code is based on work supported by the following funding agencies:","category":"page"},{"location":"packages/Plasmo/","page":"plasmo-dev/Plasmo.jl","title":"plasmo-dev/Plasmo.jl","text":"U.S. Department of Energy (DOE), Office of Science, under Contract No. DE-AC02-06CH11357\nDOE Office of Electricity Delivery and Energy Reliability’s Advanced Grid Research and Development program at Argonne National Laboratory\nNational Science Foundation under award NSF-EECS-1609183 and under award CBET-1748516","category":"page"},{"location":"packages/Plasmo/","page":"plasmo-dev/Plasmo.jl","title":"plasmo-dev/Plasmo.jl","text":"The primary developer is Jordan Jalving (@jalving) with support from the following contributors. ","category":"page"},{"location":"packages/Plasmo/","page":"plasmo-dev/Plasmo.jl","title":"plasmo-dev/Plasmo.jl","text":"Victor Zavala (University of Wisconsin-Madison)\nYankai Cao (University of British Columbia)\nKibaek Kim (Argonne National Laboratory)\nSungho Shin (University of Wisconsin-Madison)","category":"page"},{"location":"packages/Plasmo/#Citing-Plasmo.jl","page":"plasmo-dev/Plasmo.jl","title":"Citing Plasmo.jl","text":"","category":"section"},{"location":"packages/Plasmo/","page":"plasmo-dev/Plasmo.jl","title":"plasmo-dev/Plasmo.jl","text":"If you find Plasmo.jl useful for your work, you may cite the manuscript as:","category":"page"},{"location":"packages/Plasmo/","page":"plasmo-dev/Plasmo.jl","title":"plasmo-dev/Plasmo.jl","text":"@article{Jalving2022,\n title={A Graph-Based Modeling Abstraction for Optimization: Concepts and Implementation in Plasmo.jl},\n author={Jordan Jalving and Sungho Shin and Victor M. Zavala},\n journal={Mathematical Programming Computation},\n year={2022},\n volume={14},\n pages={699 - 747},\n doi={10.1007/s12532-022-00223-3}\n}","category":"page"},{"location":"packages/Plasmo/","page":"plasmo-dev/Plasmo.jl","title":"plasmo-dev/Plasmo.jl","text":"You can also access a freely available pre-print.","category":"page"},{"location":"packages/Plasmo/","page":"plasmo-dev/Plasmo.jl","title":"plasmo-dev/Plasmo.jl","text":"There is also an earlier manuscript where we presented the initial ideas behind Plasmo.jl which you can find here:","category":"page"},{"location":"packages/Plasmo/","page":"plasmo-dev/Plasmo.jl","title":"plasmo-dev/Plasmo.jl","text":"@article{JalvingCaoZavala2019,\nauthor = {Jalving, Jordan and Cao, Yankai and Zavala, Victor M},\njournal = {Computers {\\&} Chemical Engineering},\npages = {134--154},\ntitle = {Graph-based modeling and simulation of complex systems},\nvolume = {125},\nyear = {2019},\ndoi = {10.1016/j.compchemeng.2019.03.009}\n}","category":"page"},{"location":"packages/Plasmo/","page":"plasmo-dev/Plasmo.jl","title":"plasmo-dev/Plasmo.jl","text":"A pre-print of this paper can be found here","category":"page"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"EditURL = \"https://github.com/jump-dev/MiniZinc.jl/blob/v0.3.12/README.md\"","category":"page"},{"location":"packages/MiniZinc/#MiniZinc.jl","page":"jump-dev/MiniZinc.jl","title":"MiniZinc.jl","text":"","category":"section"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"MiniZinc.jl is a wrapper for the MiniZinc constraint modeling language.","category":"page"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"It provides a way to write MathOptInterface models to .mzn files, and a way to interact with libminizinc.","category":"page"},{"location":"packages/MiniZinc/#Affiliation","page":"jump-dev/MiniZinc.jl","title":"Affiliation","text":"","category":"section"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"This wrapper is maintained by the JuMP community and is not part of the MiniZinc project.","category":"page"},{"location":"packages/MiniZinc/#Getting-help","page":"jump-dev/MiniZinc.jl","title":"Getting help","text":"","category":"section"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"If you need help, please ask a question on the JuMP community forum.","category":"page"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"If you have a reproducible example of a bug, please open a GitHub issue.","category":"page"},{"location":"packages/MiniZinc/#License","page":"jump-dev/MiniZinc.jl","title":"License","text":"","category":"section"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"MiniZinc.jl is licensed under the MIT License.","category":"page"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"The underlying project, MiniZinc/libminizinc, is licensed under the MPL 2.0 license.","category":"page"},{"location":"packages/MiniZinc/#Install","page":"jump-dev/MiniZinc.jl","title":"Install","text":"","category":"section"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"Install MiniZinc.jl using the Julia package manager:","category":"page"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"import Pkg\nPkg.add(\"MiniZinc\")","category":"page"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"Windows","category":"page"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"On Linux and macOS, this package automatically installs libminizinc. However, we're still working out problems with the install on Windows. To use MiniZinc.jl, you'll need to manually install a copy of libminizinc from minizinc.org or compile one yourself from MiniZinc/libminizinc.","category":"page"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"To teach MiniZinc.jl where to look for libminizinc, set the JULIA_LIBMINIZINC_DIR environment variable. For example:","category":"page"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"ENV[\"JULIA_LIBMINIZINC_DIR\"] = \"C:\\\\Program Files\\\\MiniZinc\"","category":"page"},{"location":"packages/MiniZinc/#Use-with-MathOptInterface","page":"jump-dev/MiniZinc.jl","title":"Use with MathOptInterface","text":"","category":"section"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"MiniZinc.jl supports the constraint programming sets defined in MathOptInterface, as well as (in)equality constraints.","category":"page"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"The following example solves the following constraint program:","category":"page"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"xᵢ ∈ {1, 2, 3} ∀i=1,2,3\nzⱼ ∈ {0, 1} ∀j=1,2\nz₁ <-> x₁ != x₂\nz₂ <-> x₂ != x₃\nz₁ + z₂ = 1","category":"page"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"julia> import MiniZinc\n\njulia> import MathOptInterface as MOI\n\njulia> function main()\n model = MOI.Utilities.CachingOptimizer(\n MiniZinc.Model{Int}(),\n MiniZinc.Optimizer{Int}(\"chuffed\"),\n )\n # xᵢ ∈ {1, 2, 3} ∀i=1,2,3\n x = MOI.add_variables(model, 3)\n MOI.add_constraint.(model, x, MOI.Interval(1, 3))\n MOI.add_constraint.(model, x, MOI.Integer())\n # zⱼ ∈ {0, 1} ∀j=1,2\n z = MOI.add_variables(model, 2)\n MOI.add_constraint.(model, z, MOI.ZeroOne())\n # z₁ <-> x₁ != x₂\n MOI.add_constraint(\n model,\n MOI.VectorOfVariables([z[1], x[1], x[2]]),\n MOI.Reified(MOI.AllDifferent(2)),\n )\n # z₂ <-> x₂ != x₃\n MOI.add_constraint(\n model,\n MOI.VectorOfVariables([z[2], x[2], x[3]]),\n MOI.Reified(MOI.AllDifferent(2)),\n )\n # z₁ + z₂ = 1\n MOI.add_constraint(model, 1 * z[1] + x[2], MOI.EqualTo(1))\n MOI.optimize!(model)\n x_star = MOI.get(model, MOI.VariablePrimal(), x)\n z_star = MOI.get(model, MOI.VariablePrimal(), z)\n return x_star, z_star\n end\nmain (generic function with 1 method)\n\njulia> main()\n([1, 1, 3], [0, 1])","category":"page"},{"location":"packages/MiniZinc/#Use-with-JuMP","page":"jump-dev/MiniZinc.jl","title":"Use with JuMP","text":"","category":"section"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"You can also call MiniZinc from JuMP, using any solver that libminizinc supports. By default, MiniZinc.jl is compiled with the HiGHS MILP solver, which can be selected by passing the \"highs\" parameter to MiniZinc.Optimizer:","category":"page"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"using JuMP\nimport MiniZinc\nmodel = Model(() -> MiniZinc.Optimizer{Float64}(\"highs\"))\n@variable(model, 1 <= x[1:3] <= 3, Int)\n@constraint(model, x in MOI.AllDifferent(3))\n@objective(model, Max, sum(i * x[i] for i in 1:3))\noptimize!(model)\n@show value.(x)","category":"page"},{"location":"packages/MiniZinc/#MathOptInterface-API","page":"jump-dev/MiniZinc.jl","title":"MathOptInterface API","text":"","category":"section"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"The MiniZinc Optimizer{T} supports the following constraints and attributes.","category":"page"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"List of supported objective functions:","category":"page"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"MOI.ObjectiveFunction{MOI.ScalarAffineFunction{T}}\nMOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{T}}\nMOI.ObjectiveFunction{MOI.VariableIndex}","category":"page"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"List of supported variable types:","category":"page"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"MOI.Reals","category":"page"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"List of supported constraint types:","category":"page"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"MOI.ScalarAffineFunction{T} in MOI.EqualTo{T}\nMOI.ScalarAffineFunction{T} in MOI.GreaterThan{T}\nMOI.ScalarAffineFunction{T} in MOI.Integer\nMOI.ScalarAffineFunction{T} in MOI.Interval{T}\nMOI.ScalarAffineFunction{T} in MOI.LessThan{T}\nMOI.ScalarAffineFunction{T} in MOI.ZeroOne\nMOI.VariableIndex in MOI.EqualTo{T}\nMOI.VariableIndex in MOI.GreaterThan{T}\nMOI.VariableIndex in MOI.Integer\nMOI.VariableIndex in MOI.Interval{T}\nMOI.VariableIndex in MOI.LessThan{T}\nMOI.VariableIndex in MOI.Parameter{T}\nMOI.VariableIndex in MOI.Semicontinuous{T}\nMOI.VariableIndex in MOI.Semiinteger{T}\nMOI.VariableIndex in MOI.ZeroOne\nMOI.VectorOfVariables in MOI.AllDifferent\nMOI.VectorOfVariables in MOI.BinPacking{T}\nMOI.VectorOfVariables in MOI.Circuit\nMOI.VectorOfVariables in MOI.CountAtLeast\nMOI.VectorOfVariables in MOI.CountBelongs\nMOI.VectorOfVariables in MOI.CountDistinct\nMOI.VectorOfVariables in MOI.CountGreaterThan\nMOI.VectorOfVariables in MOI.Cumulative\nMOI.VectorOfVariables in MOI.Path\nMOI.VectorOfVariables in MOI.Table{T}","category":"page"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"List of supported model attributes:","category":"page"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"MOI.NLPBlock()\nMOI.Name()\nMOI.ObjectiveSense()","category":"page"},{"location":"packages/MiniZinc/#Options","page":"jump-dev/MiniZinc.jl","title":"Options","text":"","category":"section"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"Set options using MOI.RawOptimizerAttribute in MOI or set_attribute in JuMP.","category":"page"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"MiniZinc.jl supports the following options:","category":"page"},{"location":"packages/MiniZinc/","page":"jump-dev/MiniZinc.jl","title":"jump-dev/MiniZinc.jl","text":"model_filename::String = \"\": the location at which to write out the .mzn file during optimization. This option can be helpful during debugging. If left empty, a temporary file will be used instead.\nMOI.SolutionLimit: set this option to a positive integer to return up to the limit number of solutions.","category":"page"},{"location":"tutorials/linear/transp/","page":"The transportation problem","title":"The transportation problem","text":"EditURL = \"transp.jl\"","category":"page"},{"location":"tutorials/linear/transp/#The-transportation-problem","page":"The transportation problem","title":"The transportation problem","text":"","category":"section"},{"location":"tutorials/linear/transp/","page":"The transportation problem","title":"The transportation problem","text":"This tutorial was generated using Literate.jl. Download the source as a .jl file.","category":"page"},{"location":"tutorials/linear/transp/","page":"The transportation problem","title":"The transportation problem","text":"This tutorial was originally contributed by Louis Luangkesorn.","category":"page"},{"location":"tutorials/linear/transp/","page":"The transportation problem","title":"The transportation problem","text":"This tutorial is an adaptation of the transportation problem described in AMPL: A Modeling Language for Mathematical Programming, by R. Fourer, D.M. Gay and B.W. Kernighan.","category":"page"},{"location":"tutorials/linear/transp/","page":"The transportation problem","title":"The transportation problem","text":"The purpose of this tutorial is to demonstrate how to create a JuMP model from an ad-hoc structured text file.","category":"page"},{"location":"tutorials/linear/transp/#Required-packages","page":"The transportation problem","title":"Required packages","text":"","category":"section"},{"location":"tutorials/linear/transp/","page":"The transportation problem","title":"The transportation problem","text":"This tutorial uses the following packages:","category":"page"},{"location":"tutorials/linear/transp/","page":"The transportation problem","title":"The transportation problem","text":"using JuMP\nimport DelimitedFiles\nimport HiGHS","category":"page"},{"location":"tutorials/linear/transp/#Formulation","page":"The transportation problem","title":"Formulation","text":"","category":"section"},{"location":"tutorials/linear/transp/","page":"The transportation problem","title":"The transportation problem","text":"Suppose that we have a set of factories that produce pogo sticks, and a set of retail stores in which to sell them. Each factory has a maximum number of pogo sticks that it can produce, and each retail store has a demand of pogo sticks that it can sell.","category":"page"},{"location":"tutorials/linear/transp/","page":"The transportation problem","title":"The transportation problem","text":"In the transportation problem, we want to choose the number of pogo sticks to make and ship from each factory to each retail store that minimizes the total shipping cost.","category":"page"},{"location":"tutorials/linear/transp/","page":"The transportation problem","title":"The transportation problem","text":"Mathematically, we represent our set of factories by a set of origins i in O and our retail stores by a set of destinations j in D. The maximum supply at each factory is s_i and the demand from each retail store is d_j. The cost of shipping one pogo stick from i to j is c_ij.","category":"page"},{"location":"tutorials/linear/transp/","page":"The transportation problem","title":"The transportation problem","text":"With a little effort, we can model the transportation problem as the following linear program:","category":"page"},{"location":"tutorials/linear/transp/","page":"The transportation problem","title":"The transportation problem","text":"beginaligned\nmin sum_i in O j in D c_ij x_ij \nst sum_j in D x_i j le s_i forall i in O \n sum_i in O x_i j = d_j forall j in D \n x_i j ge 0 forall i in O j in D\nendaligned","category":"page"},{"location":"tutorials/linear/transp/#Data","page":"The transportation problem","title":"Data","text":"","category":"section"},{"location":"tutorials/linear/transp/","page":"The transportation problem","title":"The transportation problem","text":"We assume our data is in the form of a text file that has the following form. In practice, we would obtain this text file from the user as input, but for the purpose of this tutorial we're going to create it from Julia.","category":"page"},{"location":"tutorials/linear/transp/","page":"The transportation problem","title":"The transportation problem","text":"open(joinpath(@__DIR__, \"transp.txt\"), \"w\") do io\n print(\n io,\n \"\"\"\n . FRA DET LAN WIN STL FRE LAF SUPPLY\n GARY 39 14 11 14 16 82 8 1400\n CLEV 27 . 12 . 26 95 17 2600\n PITT 24 14 17 13 28 99 20 2900\n DEMAND 900 1200 600 400 1700 1100 1000 0\n \"\"\",\n )\n return\nend","category":"page"},{"location":"tutorials/linear/transp/","page":"The transportation problem","title":"The transportation problem","text":"Here the rows are the origins, the columns are the destinations, and the values are the cost of shipping one pogo stick from the origin to the destination. If pogo stick cannot be transported from a source to a destination, then the value is .. The final row and final column are the demand and supply of each location respectively.","category":"page"},{"location":"tutorials/linear/transp/","page":"The transportation problem","title":"The transportation problem","text":"We didn't account for arcs which do not exist in our formulation, but we can make a small change and fix x_ij = 0 if c_ij = .","category":"page"},{"location":"tutorials/linear/transp/","page":"The transportation problem","title":"The transportation problem","text":"Our first step is to convert this text format into an appropriate Julia datastructure that we can work with. Since our data is tabular with named rows and columns, one option is JuMP's Containers.DenseAxisArray object:","category":"page"},{"location":"tutorials/linear/transp/","page":"The transportation problem","title":"The transportation problem","text":"function read_data(filename::String)\n data = DelimitedFiles.readdlm(filename)\n rows, columns = data[2:end, 1], data[1, 2:end]\n return Containers.DenseAxisArray(data[2:end, 2:end], rows, columns)\nend\n\ndata = read_data(joinpath(@__DIR__, \"transp.txt\"))","category":"page"},{"location":"tutorials/linear/transp/#JuMP-formulation","page":"The transportation problem","title":"JuMP formulation","text":"","category":"section"},{"location":"tutorials/linear/transp/","page":"The transportation problem","title":"The transportation problem","text":"Following Design patterns for larger models, we code our JuMP model as a function which takes in an input. In this example, we print the output to stdout:","category":"page"},{"location":"tutorials/linear/transp/","page":"The transportation problem","title":"The transportation problem","text":"function solve_transportation_problem(data::Containers.DenseAxisArray)\n # Get the set of supplies and demands\n O, D = axes(data)\n # Drop the SUPPLY and DEMAND nodes from our sets\n O, D = setdiff(O, [\"DEMAND\"]), setdiff(D, [\"SUPPLY\"])\n model = Model(HiGHS.Optimizer)\n set_silent(model)\n @variable(model, x[o in O, d in D] >= 0)\n # Remove arcs with \".\" cost by fixing them to 0.0.\n for o in O, d in D\n if data[o, d] == \".\"\n fix(x[o, d], 0.0; force = true)\n end\n end\n @objective(\n model,\n Min,\n sum(data[o, d] * x[o, d] for o in O, d in D if data[o, d] != \".\"),\n )\n @constraint(model, [o in O], sum(x[o, :]) <= data[o, \"SUPPLY\"])\n @constraint(model, [d in D], sum(x[:, d]) == data[\"DEMAND\", d])\n optimize!(model)\n @assert is_solved_and_feasible(model)\n # Pretty print the solution in the format of the input\n print(\" \", join(lpad.(D, 7, ' ')))\n for o in O\n print(\"\\n\", o)\n for d in D\n if isapprox(value(x[o, d]), 0.0; atol = 1e-6)\n print(\" .\")\n else\n print(\" \", lpad(value(x[o, d]), 6, ' '))\n end\n end\n end\n return\nend","category":"page"},{"location":"tutorials/linear/transp/#Solution","page":"The transportation problem","title":"Solution","text":"","category":"section"},{"location":"tutorials/linear/transp/","page":"The transportation problem","title":"The transportation problem","text":"Let's solve and view the solution:","category":"page"},{"location":"tutorials/linear/transp/","page":"The transportation problem","title":"The transportation problem","text":"solve_transportation_problem(data)","category":"page"},{"location":"moi/developer/checklists/","page":"Checklists","title":"Checklists","text":"EditURL = \"https://github.com/jump-dev/MathOptInterface.jl/blob/v1.34.0/docs/src/developer/checklists.md\"","category":"page"},{"location":"moi/developer/checklists/#Checklists","page":"Checklists","title":"Checklists","text":"","category":"section"},{"location":"moi/developer/checklists/","page":"Checklists","title":"Checklists","text":"The purpose of this page is to collate a series of checklists for commonly performed changes to the source code of MathOptInterface.","category":"page"},{"location":"moi/developer/checklists/","page":"Checklists","title":"Checklists","text":"In each case, copy the checklist into the description of the pull request.","category":"page"},{"location":"moi/developer/checklists/#Making-a-release","page":"Checklists","title":"Making a release","text":"","category":"section"},{"location":"moi/developer/checklists/","page":"Checklists","title":"Checklists","text":"Use this checklist when making a release of the MathOptInterface repository.","category":"page"},{"location":"moi/developer/checklists/","page":"Checklists","title":"Checklists","text":"## Basic\n\n - [ ] `version` field of `Project.toml` has been updated\n - If a breaking change, increment the MAJOR field and reset others to 0\n - If adding new features, increment the MINOR field and reset PATCH to 0\n - If adding bug fixes or documentation changes, increment the PATCH field\n\n## Documentation\n\n - [ ] Add a new entry to `docs/src/changelog.md`, following existing style\n\n## Tests\n\n - [ ] The `solver-tests.yml` GitHub action does not have unexpected failures.\n To run the action, go to:\n https://github.com/jump-dev/MathOptInterface.jl/actions/workflows/solver-tests.yml\n and click \"Run workflow\"","category":"page"},{"location":"moi/developer/checklists/#Adding-a-new-set","page":"Checklists","title":"Adding a new set","text":"","category":"section"},{"location":"moi/developer/checklists/","page":"Checklists","title":"Checklists","text":"Use this checklist when adding a new set to the MathOptInterface repository.","category":"page"},{"location":"moi/developer/checklists/","page":"Checklists","title":"Checklists","text":"## Basic\n\n - [ ] Add a new `AbstractScalarSet` or `AbstractVectorSet` to `src/sets.jl`\n - [ ] If `isbitstype(S) == false`, implement `Base.copy(set::S)`\n - [ ] If `isbitstype(S) == false`, implement `Base.:(==)(x::S, y::S)`\n - [ ] If an `AbstractVectorSet`, implement `dimension(set::S)`, unless the\n dimension is given by `set.dimension`.\n\n## Utilities\n\n - [ ] If an `AbstractVectorSet`, implement `Utilities.set_dot`,\n unless the dot product between two vectors in the set is equivalent to\n `LinearAlgebra.dot`\n - [ ] If an `AbstractVectorSet`, implement `Utilities.set_with_dimension` in\n `src/Utilities/matrix_of_constraints.jl`\n - [ ] Add the set to the `@model` macro at the bottom of `src/Utilities.model.jl`\n\n## Documentation\n\n - [ ] Add a docstring, which gives the mathematical definition of the set,\n along with an `## Example` block containing a `jldoctest`\n - [ ] Add the docstring to `docs/src/reference/standard_form.md`\n - [ ] Add the set to the relevant table in `docs/src/manual/standard_form.md`\n\n## Tests\n\n - [ ] Define a new `_set(::Type{S})` method in `src/Test/test_basic_constraint.jl`\n and add the name of the set to the list at the bottom of that files\n - [ ] If the set has any checks in its constructor, add tests to `test/sets.jl`\n\n## MathOptFormat\n\n - [ ] Open an issue at `https://github.com/jump-dev/MathOptFormat` to add\n support for the new set {{ replace with link to the issue }}\n\n## Optional\n\n - [ ] Implement `dual_set(::S)` and `dual_set_type(::Type{S})`\n - [ ] Add new tests to the `Test` submodule exercising your new set\n - [ ] Add new bridges to convert your set into more commonly used sets","category":"page"},{"location":"moi/developer/checklists/#Adding-a-new-bridge","page":"Checklists","title":"Adding a new bridge","text":"","category":"section"},{"location":"moi/developer/checklists/","page":"Checklists","title":"Checklists","text":"Use this checklist when adding a new bridge to the MathOptInterface repository.","category":"page"},{"location":"moi/developer/checklists/","page":"Checklists","title":"Checklists","text":"The steps are mostly the same, but locations depend on whether the bridge is a Constraint, Objective, or Variable bridge. In each case below, replace XXX with the appropriate type of bridge.","category":"page"},{"location":"moi/developer/checklists/","page":"Checklists","title":"Checklists","text":"## Basic\n\n - [ ] Create a new file in `src/Bridges/XXX/bridges`\n - [ ] Define the bridge, following existing examples. The name of the bridge\n struct must end in `Bridge`\n - [ ] Check if your bridge can be a subtype of [`MOI.Bridges.Constraint.SetMapBridge`](@ref)\n - [ ] Define a new `const` that is a `SingleBridgeOptimizer` wrapping the\n new bridge. The name of the const must be the name of the bridge, less\n the `Bridge` suffix\n - [ ] `include` the file in `src/Bridges/XXX/bridges/XXX.jl`\n - [ ] If the bridge should be enabled by default, add the bridge to\n `add_all_bridges` at the bottom of `src/Bridges/XXX/XXX.jl`\n\n## Tests\n\n - [ ] Create a new file in the appropriate subdirectory of `tests/Bridges/XXX`\n - [ ] Use `MOI.Bridges.runtests` to test various inputs and outputs of the\n bridge\n - [ ] If, after opening the pull request to add the bridge, some lines are not\n covered by the tests, add additional bridge-specific tests to cover the\n untested lines.\n\n## Documentation\n\n - [ ] Add a docstring which uses the same template as existing bridges.\n\n## Final touch\n\nIf the bridge depends on run-time values of other variables and constraints in\nthe model:\n\n - [ ] Implement `MOI.Utilities.needs_final_touch(::Bridge)`\n - [ ] Implement `MOI.Utilities.final_touch(::Bridge, ::MOI.ModelLike)`\n - [ ] Ensure that `final_touch` can be called multiple times in a row","category":"page"},{"location":"moi/developer/checklists/#Updating-MathOptFormat","page":"Checklists","title":"Updating MathOptFormat","text":"","category":"section"},{"location":"moi/developer/checklists/","page":"Checklists","title":"Checklists","text":"Use this checklist when updating the version of MathOptFormat.","category":"page"},{"location":"moi/developer/checklists/","page":"Checklists","title":"Checklists","text":"## Basic\n\n - [ ] The file at `src/FileFormats/MOF/mof.schema.json` is updated\n - [ ] The constant `_SUPPORTED_VERSIONS` is updated in\n `src/FileFormats/MOF/MOF.jl`\n\n## New sets\n\n - [ ] New sets are added to the `@model` in `src/FileFormats/MOF/MOF.jl`\n - [ ] New sets are added to the `@enum` in `src/FileFormats/MOF/read.jl`\n - [ ] `set_to_moi` is defined for each set in `src/FileFormats/MOF/read.jl`\n - [ ] `head_name` is defined for each set in `src/FileFormats/MOF/write.jl`\n - [ ] A new unit test calling `_test_model_equality` is aded to\n `test/FileFormats/MOF/MOF.jl`\n\n## Tests\n\n - [ ] The version field in `test/FileFormats/MOF/nlp.mof.json` is updated\n\n## Documentation\n\n - [ ] The version fields are updated in `docs/src/submodules/FileFormats/overview.md`","category":"page"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"EditURL = \"https://github.com/jump-dev/SDPLR.jl/blob/v0.1.0/README.md\"","category":"page"},{"location":"packages/SDPLR/#SDPLR","page":"jump-dev/SDPLR.jl","title":"SDPLR","text":"","category":"section"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"(Image: Build Status) (Image: codecov)","category":"page"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"SDPLR.jl is a wrapper for the SDPLR semidefinite programming solver.","category":"page"},{"location":"packages/SDPLR/#License","page":"jump-dev/SDPLR.jl","title":"License","text":"","category":"section"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"SDPLR.jl is licensed under the MIT License.","category":"page"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"The underlying solver, SDPLR, is licensed under the GPL v2 license.","category":"page"},{"location":"packages/SDPLR/#Installation","page":"jump-dev/SDPLR.jl","title":"Installation","text":"","category":"section"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"Install SDPLR.jl using Pkg.add:","category":"page"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"import Pkg\nPkg.add(\"SDPLR\")","category":"page"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"In addition to installing the SDPLR.jl package, this will also download and install the SDPLR binaries. You do not need to install SDPLR separately.","category":"page"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"To use a custom binary, read the Custom solver binaries section of the JuMP documentation.","category":"page"},{"location":"packages/SDPLR/#Use-with-JuMP","page":"jump-dev/SDPLR.jl","title":"Use with JuMP","text":"","category":"section"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"To use SDPLR with JuMP, use SDPLR.Optimizer:","category":"page"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"using JuMP, SDPLR\nmodel = Model(SDPLR.Optimizer)","category":"page"},{"location":"packages/SDPLR/#MathOptInterface-API","page":"jump-dev/SDPLR.jl","title":"MathOptInterface API","text":"","category":"section"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"The SDPLR optimizer supports the following constraints and attributes.","category":"page"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"List of supported objective functions:","category":"page"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}","category":"page"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"List of supported variable types:","category":"page"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"MOI.Nonnegatives\nMOI.PositiveSemidefiniteConeTriangle","category":"page"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"List of supported constraint types:","category":"page"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"MOI.ScalarAffineFunction{Float64} in MOI.EqualTo{Float64}","category":"page"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"List of supported model attributes:","category":"page"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"MOI.ObjectiveSense()","category":"page"},{"location":"packages/SDPLR/#Attributes","page":"jump-dev/SDPLR.jl","title":"Attributes","text":"","category":"section"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"The algorithm is parametrized by the attributes that can be used both with JuMP.set_attributes and JuMP.get_attributes and have the following types and default values:","category":"page"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"rho_f::Cdouble = 1.0e-5\nrho_c::Cdouble = 1.0e-1\nsigmafac::Cdouble = 2.0\nrankreduce::Csize_t = 0\ntimelim::Csize_t = 3600\nprintlevel::Csize_t = 1\ndthresh_dim::Csize_t = 10\ndthresh_dens::Cdouble = 0.75\nnumbfgsvecs::Csize_t = 4\nrankredtol::Cdouble = 2.2204460492503131e-16\ngaptol::Cdouble = 1.0e-3\ncheckbd::Cptrdiff_t = -1\ntypebd::Cptrdiff_t = 1\nmaxrank::Function = default_maxrank","category":"page"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"The following attributes can be also be used both with JuMP.set_attributes and JuMP.get_attributes, but they are also modified by optimize!:","category":"page"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"majiter\niter\nlambdaupdate\ntotaltime\nsigma","category":"page"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"When they are set, it provides the initial value of the algorithm. With get, they provide the value at the end of the algorithm. totaltime is the total time in second. For the other attributes, their meaning is best described by the following pseudo-code.","category":"page"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"Given values of R, lambda and sigma, let vio = [dot(A[i], R * R') - b[i]) for i in 1:m] (vio[0] is dot(C, R * R') in the C implementation, but we ignore this entry here), val = dot(C, R * R') - dot(vio, lambda) + sigma/2 * norm(vio)^2, y = -lambda - sigma * vio, S = C + sum(A[i] * y[i] for i in 1:m) and the gradient is G = 2S * R. Note that norm(G) used in SDPLR when comparing with rho_c which has a 2-scaling difference from norm(S * R) used in the paper.","category":"page"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"The SDPLR solvers implements the following algorithm.","category":"page"},{"location":"packages/SDPLR/","page":"jump-dev/SDPLR.jl","title":"jump-dev/SDPLR.jl","text":"sigma = inv(sum(size(A[i], 1) for i in 1:m))\norigval = val\nwhile majiter++ < 100_000\n lambdaupdate = 0\n localiter = 100\n while localiter > 10\n lambdaupdate += 1\n localiter = 0\n if norm(G) / (norm(C) + 1) <= rho_c / sigma\n break\n end\n while norm(G) / (norm(C) + 1) - rho_c / sigma > eps()\n localiter += 1\n iter += 1\n D = lbfgs(G)\n R += linesearch(D) * D\n if norm(vio) / (norm(b) + 1) <= rho_f || totaltime >= timelim || iter >= 10_000_000\n return\n end\n end\n lambda -= sigma * vio\n end\n if val - 1e10 * abs(origval) > eps()\n return\n end\n if norm(vio) / (norm(b) + 1) <= rho_f || totaltime >= timelim || iter >= 10_000_000\n return\n end\n while norm(G) / (norm(C) + 1) > rho_c / sigma\n sigma *= 2\n end\n lambdaupdate = 0\nend","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"DocTestSetup = quote\n using JuMP\nend","category":"page"},{"location":"manual/expressions/#Expressions","page":"Expressions","title":"Expressions","text":"","category":"section"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"JuMP has three types of expressions: affine, quadratic, and nonlinear. These expressions can be inserted into constraints or into the objective. This is particularly useful if an expression is used in multiple places in the model.","category":"page"},{"location":"manual/expressions/#Affine-expressions","page":"Expressions","title":"Affine expressions","text":"","category":"section"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"There are four ways of constructing an affine expression in JuMP: with the @expression macro, with operator overloading, with the AffExpr constructor, and with add_to_expression!.","category":"page"},{"location":"manual/expressions/#Macros","page":"Expressions","title":"Macros","text":"","category":"section"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"The recommended way to create an affine expression is via the @expression macro.","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"julia> model = Model();\n\njulia> @variable(model, x)\nx\n\njulia> @variable(model, y)\ny\n\njulia> ex = @expression(model, 2x + y - 1)\n2 x + y - 1","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"This expression can be used in the objective or added to a constraint. For example:","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"julia> @objective(model, Min, 2 * ex - 1)\n4 x + 2 y - 3\n\njulia> objective_function(model)\n4 x + 2 y - 3","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"Just like variables and constraints, named expressions can also be created. For example","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"julia> model = Model();\n\njulia> @variable(model, x[i = 1:3]);\n\njulia> @expression(model, expr[i = 1:3], i * sum(x[j] for j in i:3));\n\njulia> expr\n3-element Vector{AffExpr}:\n x[1] + x[2] + x[3]\n 2 x[2] + 2 x[3]\n 3 x[3]","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"tip: Tip\nYou can read more about containers in the Containers section.","category":"page"},{"location":"manual/expressions/#Operator-overloading","page":"Expressions","title":"Operator overloading","text":"","category":"section"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"Expressions can also be created without macros. However, note that in some cases, this can be much slower that constructing an expression using macros.","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"julia> model = Model();\n\njulia> @variable(model, x)\nx\n\njulia> @variable(model, y)\ny\n\njulia> ex = 2x + y - 1\n2 x + y - 1","category":"page"},{"location":"manual/expressions/#Constructors","page":"Expressions","title":"Constructors","text":"","category":"section"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"A third way to create an affine expression is by the AffExpr constructor. The first argument is the constant term, and the remaining arguments are variable-coefficient pairs.","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"julia> model = Model();\n\njulia> @variable(model, x)\nx\n\njulia> @variable(model, y)\ny\n\njulia> ex = AffExpr(-1.0, x => 2.0, y => 1.0)\n2 x + y - 1","category":"page"},{"location":"manual/expressions/#add_to_expression!","page":"Expressions","title":"add_to_expression!","text":"","category":"section"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"The fourth way to create an affine expression is by using add_to_expression!. Compared to the operator overloading method, this approach is faster because it avoids constructing temporary objects. The @expression macro uses add_to_expression! behind-the-scenes.","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"julia> model = Model();\n\njulia> @variable(model, x)\nx\n\njulia> @variable(model, y)\ny\n\njulia> ex = AffExpr(-1.0)\n-1\n\njulia> add_to_expression!(ex, 2.0, x)\n2 x - 1\n\njulia> add_to_expression!(ex, 1.0, y)\n2 x + y - 1","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"add_to_expression! can also be used to sum expressions in-place:","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"julia> model = Model();\n\njulia> @variable(model, x[1:2])\n2-element Vector{VariableRef}:\n x[1]\n x[2]\n\njulia> @expression(model, ex1, sum(x))\nx[1] + x[2]\n\njulia> @expression(model, ex2, 2 * sum(x))\n2 x[1] + 2 x[2]\n\njulia> add_to_expression!(ex1, ex2)\n3 x[1] + 3 x[2]\n\njulia> ex1\n3 x[1] + 3 x[2]\n\njulia> ex2\n2 x[1] + 2 x[2]","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"warning: Warning\nRead the section Initializing arrays for some cases to be careful about when using add_to_expression!.","category":"page"},{"location":"manual/expressions/#Removing-zero-terms","page":"Expressions","title":"Removing zero terms","text":"","category":"section"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"Use drop_zeros! to remove terms from an affine expression with a 0 coefficient.","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"julia> model = Model();\n\njulia> @variable(model, x)\nx\n\njulia> @expression(model, ex, x + 1 - x)\n0 x + 1\n\njulia> drop_zeros!(ex)\n\njulia> ex\n1","category":"page"},{"location":"manual/expressions/#Coefficients","page":"Expressions","title":"Coefficients","text":"","category":"section"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"Use coefficient to return the coefficient associated with a variable in an affine expression.","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"julia> model = Model();\n\njulia> @variable(model, x)\nx\n\njulia> @variable(model, y)\ny\n\njulia> @expression(model, ex, 2x + 1)\n2 x + 1\n\njulia> coefficient(ex, x)\n2.0\n\njulia> coefficient(ex, y)\n0.0","category":"page"},{"location":"manual/expressions/#Quadratic-expressions","page":"Expressions","title":"Quadratic expressions","text":"","category":"section"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"Like affine expressions, there are four ways of constructing a quadratic expression in JuMP: macros, operator overloading, constructors, and add_to_expression!.","category":"page"},{"location":"manual/expressions/#Macros-2","page":"Expressions","title":"Macros","text":"","category":"section"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"The @expression macro can be used to create quadratic expressions by including quadratic terms.","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"julia> model = Model();\n\njulia> @variable(model, x)\nx\n\njulia> @variable(model, y)\ny\n\njulia> ex = @expression(model, x^2 + 2 * x * y + y^2 + x + y - 1)\nx² + 2 x*y + y² + x + y - 1","category":"page"},{"location":"manual/expressions/#Operator-overloading-2","page":"Expressions","title":"Operator overloading","text":"","category":"section"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"Operator overloading can also be used to create quadratic expressions. The same performance warning (discussed in the affine expression section) applies.","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"julia> model = Model();\n\njulia> @variable(model, x)\nx\n\njulia> @variable(model, y)\ny\n\njulia> ex = x^2 + 2 * x * y + y^2 + x + y - 1\nx² + 2 x*y + y² + x + y - 1","category":"page"},{"location":"manual/expressions/#Constructors-2","page":"Expressions","title":"Constructors","text":"","category":"section"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"Quadratic expressions can also be created using the QuadExpr constructor. The first argument is an affine expression, and the remaining arguments are pairs, where the first term is a JuMP.UnorderedPair and the second term is the coefficient.","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"julia> model = Model();\n\njulia> @variable(model, x)\nx\n\njulia> @variable(model, y)\ny\n\njulia> aff_expr = AffExpr(-1.0, x => 1.0, y => 1.0)\nx + y - 1\n\njulia> quad_expr = QuadExpr(\n aff_expr,\n UnorderedPair(x, x) => 1.0,\n UnorderedPair(x, y) => 2.0,\n UnorderedPair(y, y) => 1.0,\n )\nx² + 2 x*y + y² + x + y - 1","category":"page"},{"location":"manual/expressions/#add_to_expression!-2","page":"Expressions","title":"add_to_expression!","text":"","category":"section"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"Finally, add_to_expression! can also be used to add quadratic terms.","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"julia> model = Model();\n\njulia> @variable(model, x)\nx\n\njulia> @variable(model, y)\ny\n\njulia> ex = QuadExpr(x + y - 1.0)\nx + y - 1\n\njulia> add_to_expression!(ex, 1.0, x, x)\nx² + x + y - 1\n\njulia> add_to_expression!(ex, 2.0, x, y)\nx² + 2 x*y + x + y - 1\n\njulia> add_to_expression!(ex, 1.0, y, y)\nx² + 2 x*y + y² + x + y - 1","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"warning: Warning\nRead the section Initializing arrays for some cases to be careful about when using add_to_expression!.","category":"page"},{"location":"manual/expressions/#Removing-zero-terms-2","page":"Expressions","title":"Removing zero terms","text":"","category":"section"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"Use drop_zeros! to remove terms from a quadratic expression with a 0 coefficient.","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"julia> model = Model();\n\njulia> @variable(model, x)\nx\n\njulia> @expression(model, ex, x^2 + x + 1 - x^2)\n0 x² + x + 1\n\njulia> drop_zeros!(ex)\n\njulia> ex\nx + 1","category":"page"},{"location":"manual/expressions/#Coefficients-2","page":"Expressions","title":"Coefficients","text":"","category":"section"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"Use coefficient to return the coefficient associated with a pair of variables in a quadratic expression.","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"julia> model = Model();\n\njulia> @variable(model, x)\nx\n\njulia> @variable(model, y)\ny\n\njulia> @expression(model, ex, 2*x*y + 3*x)\n2 x*y + 3 x\n\njulia> coefficient(ex, x, y)\n2.0\n\njulia> coefficient(ex, x, x)\n0.0\n\njulia> coefficient(ex, y, x)\n2.0\n\njulia> coefficient(ex, x)\n3.0","category":"page"},{"location":"manual/expressions/#Nonlinear-expressions","page":"Expressions","title":"Nonlinear expressions","text":"","category":"section"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"Nonlinear expressions in JuMP are represented by a NonlinearExpr object. See Nonlinear expressions in detail for more details.","category":"page"},{"location":"manual/expressions/#Initializing-arrays","page":"Expressions","title":"Initializing arrays","text":"","category":"section"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"JuMP implements zero(AffExpr) and one(AffExpr) to support various functions in LinearAlgebra (for example, accessing the off-diagonal of a Diagonal matrix).","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"julia> zero(AffExpr)\n0\n\njulia> one(AffExpr)\n1","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"However, this can result in a subtle bug if you call add_to_expression! or the MutableArithmetics API on an element created by zeros or ones:","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"julia> x = zeros(AffExpr, 2)\n2-element Vector{AffExpr}:\n 0\n 0\n\njulia> add_to_expression!(x[1], 1.1)\n1.1\n\njulia> x\n2-element Vector{AffExpr}:\n 1.1\n 1.1","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"Notice how we modified x[1], but we also changed x[2]!","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"This happened because zeros(AffExpr, 2) calls zero(AffExpr) once to obtain a zero element, and then creates an appropriately sized array filled with the same element.","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"This also happens with broadcasting calls containing a conversion of 0 or 1:","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"julia> x = Vector{AffExpr}(undef, 2)\n2-element Vector{AffExpr}:\n #undef\n #undef\n\njulia> x .= 0\n2-element Vector{AffExpr}:\n 0\n 0\n\njulia> add_to_expression!(x[1], 1.1)\n1.1\n\njulia> x\n2-element Vector{AffExpr}:\n 1.1\n 1.1","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"The recommended way to create an array of empty expressions is as follows:","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"julia> x = Vector{AffExpr}(undef, 2)\n2-element Vector{AffExpr}:\n #undef\n #undef\n\njulia> for i in eachindex(x)\n x[i] = AffExpr(0.0)\n end\n\njulia> add_to_expression!(x[1], 1.1)\n1.1\n\njulia> x\n2-element Vector{AffExpr}:\n 1.1\n 0","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"Alternatively, use non-mutating operation to avoid updating x[1] in-place:","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"julia> x = zeros(AffExpr, 2)\n2-element Vector{AffExpr}:\n 0\n 0\n\njulia> x[1] += 1.1\n1.1\n\njulia> x\n2-element Vector{AffExpr}:\n 1.1\n 0","category":"page"},{"location":"manual/expressions/","page":"Expressions","title":"Expressions","text":"Note that for large expressions this will be slower due to the allocation of additional temporary objects.","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"EditURL = \"factory_schedule.jl\"","category":"page"},{"location":"tutorials/linear/factory_schedule/#The-factory-schedule-example","page":"The factory schedule example","title":"The factory schedule example","text":"","category":"section"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"This tutorial was generated using Literate.jl. Download the source as a .jl file.","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"This tutorial was originally contributed by @Crghilardi.","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"This tutorial is a Julia translation of Part 5 from Introduction to Linear Programming with Python.","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"The purpose of this tutorial is to demonstrate how to use DataFrames and delimited files, and to structure your code that is robust to infeasibilities and permits running with different datasets.","category":"page"},{"location":"tutorials/linear/factory_schedule/#Required-packages","page":"The factory schedule example","title":"Required packages","text":"","category":"section"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"This tutorial requires the following packages:","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"using JuMP\nimport CSV\nimport DataFrames\nimport HiGHS\nimport StatsPlots","category":"page"},{"location":"tutorials/linear/factory_schedule/#Formulation","page":"The factory schedule example","title":"Formulation","text":"","category":"section"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"The Factory Scheduling Problem assumes we are optimizing the production of a good from factories f in F over the course of 12 months m in M.","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"If a factory f runs during a month m, a fixed cost of a_f is incurred, the factory must produce x_mf units that is within some minimum and maximum production levels l_f and u_f respectively, and each unit of production incurs a variable cost c_f. Otherwise, the factory can be shut for the month with zero production and no fixed-cost is incurred. We denote the run/not-run decision by z_mf in 0 1, where z_mf is 1 if factory f runs in month m. The factory must produce enough units to satisfy demand d_m.","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"With a little effort, we can formulate our problem as the following linear program:","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"beginaligned\nmin sumlimits_f in F m in M a_f z_mf + c_f x_mf \ntextst x_mf le u_f z_mf forall f in F m in M \n x_mf ge l_f z_mf forall f in F m in M \n sumlimits_fin F x_mf = d_m forall f in F m in M \n z_mf in 0 1 forall f in F m in M\nendaligned","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"However, this formulation has a problem: if demand is too high, we may be unable to satisfy the demand constraint, and the problem will be infeasible.","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"tip: Tip\nWhen modeling, consider ways to formulate your model such that it always has a feasible solution. This greatly simplifies debugging data errors that would otherwise result in an infeasible solution. In practice, most practical decisions have a feasible solution. In our case, we could satisfy demand (at a high cost) by buying replacement items for the buyer, or running the factories in overtime to make up the difference.","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"We can improve our model by adding a new variable, delta_m, which represents the quantity of unmet demand in each month m. We penalize delta_m by an arbitrarily large value of $10,000/unit in the objective.","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"beginaligned\nmin sumlimits_f in F m in M a_f z_mf + c_f x_mf + sumlimits_m in M10000 delta_m \ntextst x_mf le u_f z_mf forall f in F m in M \n x_mf ge l_f z_mf forall f in F m in M \n sumlimits_fin F x_mf - delta_m = d_m forall f in F m in M \n z_mf in 0 1 forall f in F m in M \n delta_m ge 0 forall m in M\nendaligned","category":"page"},{"location":"tutorials/linear/factory_schedule/#Data","page":"The factory schedule example","title":"Data","text":"","category":"section"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"The JuMP GitHub repository contains two text files with the data we need for this tutorial.","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"The first file contains a dataset of our factories, A and B, with their production and cost levels for each month. For the documentation, the file is located at:","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"factories_filename = joinpath(@__DIR__, \"factory_schedule_factories.txt\");\nnothing #hide","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"To run locally, download factory_schedule_factories.txt and update factories_filename appropriately.","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"The file has the following contents:","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"print(read(factories_filename, String))","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"We use the CSV and DataFrames packages to read it into Julia:","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"factory_df = CSV.read(\n factories_filename,\n DataFrames.DataFrame;\n delim = ' ',\n ignorerepeated = true,\n)","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"The second file contains the demand data by month:","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"demand_filename = joinpath(@__DIR__, \"factory_schedule_demand.txt\");\nnothing #hide","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"To run locally, download factory_schedule_demand.txt and update demand_filename appropriately.","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"demand_df = CSV.read(\n demand_filename,\n DataFrames.DataFrame;\n delim = ' ',\n ignorerepeated = true,\n)","category":"page"},{"location":"tutorials/linear/factory_schedule/#Data-validation","page":"The factory schedule example","title":"Data validation","text":"","category":"section"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"Before moving on, it's always good practice to validate the data you read from external sources. The more effort you spend here, the fewer issues you will have later. The following function contains a few simple checks, but we could add more. For example, you might want to check that none of the values are too large (or too small), which might indicate a typo or a unit conversion issue (perhaps the variable costs are in $/1000 units instead of $/unit).","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"function valiate_data(\n demand_df::DataFrames.DataFrame,\n factory_df::DataFrames.DataFrame,\n)\n # Minimum production must not exceed maximum production.\n @assert all(factory_df.min_production .<= factory_df.max_production)\n # Demand, minimum production, fixed costs, and variable costs must all be\n # non-negative.\n @assert all(demand_df.demand .>= 0)\n @assert all(factory_df.min_production .>= 0)\n @assert all(factory_df.fixed_cost .>= 0)\n @assert all(factory_df.variable_cost .>= 0)\n return\nend\n\nvaliate_data(demand_df, factory_df)","category":"page"},{"location":"tutorials/linear/factory_schedule/#JuMP-formulation","page":"The factory schedule example","title":"JuMP formulation","text":"","category":"section"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"Next, we need to code our JuMP formulation. As shown in Design patterns for larger models, it's always good practice to code your model in a function that accepts well-defined input and returns well-defined output.","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"function solve_factory_scheduling(\n demand_df::DataFrames.DataFrame,\n factory_df::DataFrames.DataFrame,\n)\n # Even though we validated the data above, it's good practice to do it here\n # too.\n valiate_data(demand_df, factory_df)\n months, factories = unique(factory_df.month), unique(factory_df.factory)\n model = Model(HiGHS.Optimizer)\n set_silent(model)\n @variable(model, status[months, factories], Bin)\n @variable(model, production[months, factories], Int)\n @variable(model, unmet_demand[months] >= 0)\n # We use `eachrow` to loop through the rows of the dataframe and add the\n # relevant constraints.\n for r in eachrow(factory_df)\n m, f = r.month, r.factory\n @constraint(model, production[m, f] <= r.max_production * status[m, f])\n @constraint(model, production[m, f] >= r.min_production * status[m, f])\n end\n @constraint(\n model,\n [r in eachrow(demand_df)],\n sum(production[r.month, :]) + unmet_demand[r.month] == r.demand,\n )\n @objective(\n model,\n Min,\n 10_000 * sum(unmet_demand) + sum(\n r.fixed_cost * status[r.month, r.factory] +\n r.variable_cost * production[r.month, r.factory] for\n r in eachrow(factory_df)\n )\n )\n optimize!(model)\n @assert is_solved_and_feasible(model)\n schedules = Dict{Symbol,Vector{Float64}}(\n Symbol(f) => value.(production[:, f]) for f in factories\n )\n schedules[:unmet_demand] = value.(unmet_demand)\n return (\n termination_status = termination_status(model),\n cost = objective_value(model),\n # This `select` statement re-orders the columns in the DataFrame.\n schedules = DataFrames.select(\n DataFrames.DataFrame(schedules),\n [:unmet_demand, :A, :B],\n ),\n )\nend","category":"page"},{"location":"tutorials/linear/factory_schedule/#Solution","page":"The factory schedule example","title":"Solution","text":"","category":"section"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"Now we can call our solve_factory_scheduling function using the data we read in above.","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"solution = solve_factory_scheduling(demand_df, factory_df);\nnothing #hide","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"Let's see what solution contains:","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"solution.termination_status","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"solution.cost","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"solution.schedules","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"These schedules will be easier to visualize as a graph:","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"StatsPlots.groupedbar(\n Matrix(solution.schedules);\n bar_position = :stack,\n labels = [\"unmet demand\" \"A\" \"B\"],\n xlabel = \"Month\",\n ylabel = \"Production\",\n legend = :topleft,\n color = [\"#20326c\" \"#4063d8\" \"#a0b1ec\"],\n)","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"Note that we don't have any unmet demand.","category":"page"},{"location":"tutorials/linear/factory_schedule/#What-happens-if-demand-increases?","page":"The factory schedule example","title":"What happens if demand increases?","text":"","category":"section"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"Let's run an experiment by increasing the demand by 50% in all time periods:","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"demand_df.demand .*= 1.5","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"Now we resolve the problem:","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"high_demand_solution = solve_factory_scheduling(demand_df, factory_df);\nnothing #hide","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"and visualize the solution:","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"StatsPlots.groupedbar(\n Matrix(high_demand_solution.schedules);\n bar_position = :stack,\n labels = [\"unmet demand\" \"A\" \"B\"],\n xlabel = \"Month\",\n ylabel = \"Production\",\n legend = :topleft,\n color = [\"#20326c\" \"#4063d8\" \"#a0b1ec\"],\n)","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"Uh oh, we can't satisfy all of the demand.","category":"page"},{"location":"tutorials/linear/factory_schedule/#How-sensitive-is-the-solution-to-changes-in-variable-cost?","page":"The factory schedule example","title":"How sensitive is the solution to changes in variable cost?","text":"","category":"section"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"Let's run another experiment, this time seeing how the optimal objective value changes as we vary the variable costs of each factory.","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"First though, let's reset the demand to it's original level:","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"demand_df.demand ./= 1.5;\nnothing #hide","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"For our experiment, we're going to scale the variable costs of both factories by a set of values from 0.0 to 1.5:","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"scale_factors = 0:0.1:1.5","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"At a high level, we're going to loop over the scale factors for A, then the scale factors for B, rescale the input data, call our solve_factory_scheduling example, and then store the optimal objective value in the following cost matrix:","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"cost = zeros(length(scale_factors), length(scale_factors));\nnothing #hide","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"Because we're modifying factory_df in-place, we need to store the original variable costs in a new column:","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"factory_df[!, :old_variable_cost] = copy(factory_df.variable_cost);\nnothing #hide","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"Then, we need a function to scale the :variable_cost column for a particular factory by a value scale:","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"function scale_variable_cost(df, factory, scale)\n rows = df.factory .== factory\n df[rows, :variable_cost] .=\n round.(Int, df[rows, :old_variable_cost] .* scale)\n return\nend","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"Our experiment is just a nested for-loop, modifying A and B and storing the cost:","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"for (j, a) in enumerate(scale_factors)\n scale_variable_cost(factory_df, \"A\", a)\n for (i, b) in enumerate(scale_factors)\n scale_variable_cost(factory_df, \"B\", b)\n cost[i, j] = solve_factory_scheduling(demand_df, factory_df).cost\n end\nend","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"Let's visualize the cost matrix:","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"StatsPlots.contour(\n scale_factors,\n scale_factors,\n cost;\n xlabel = \"Scale of factory A\",\n ylabel = \"Scale of factory B\",\n)","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"What can you infer from the solution?","category":"page"},{"location":"tutorials/linear/factory_schedule/","page":"The factory schedule example","title":"The factory schedule example","text":"info: Info\nThe Power Systems tutorial explains a number of other ways you can structure a problem to perform a parametric analysis of the solution. In particular, you can use in-place modification to reduce the time it takes to build and solve the resulting models.","category":"page"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"EditURL = \"changelog.md\"","category":"page"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"CurrentModule = JuMP","category":"page"},{"location":"release_notes/#Release-notes","page":"Release notes","title":"Release notes","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.","category":"page"},{"location":"release_notes/#[Version-1.23.4](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.23.4)-(November-8,-2024)","page":"Release notes","title":"Version 1.23.4 (November 8, 2024)","text":"","category":"section"},{"location":"release_notes/#Fixed","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed UnsupportedNonlinearOperator error for the single argument LinearAlgebra.norm (#3864)\nFixed printing MOI.Interval with MIME\"text/latex\" (#3866)","category":"page"},{"location":"release_notes/#Other","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Various minor improvements to the documentation (#3855) (#3860)\nAdded MathOptAI.jl and MathOptSymbolicAD.jl to the list of extensions in the documentation (#3858)\nClarified add_to_expression! can add two expressions (#3859)\nAdded SHOT to the installation table (#3853)\nImprovements to test coverage (#3867) (#3868) (#3869) (#3870) (#3871) (#3872) (#3873) (#3874) (#3875)\nJuMP now uses MOI.add_constrained_variable when adding a scalar variable with bounds for improving model creation performance with some solvers (#3863) (#3865)","category":"page"},{"location":"release_notes/#[Version-1.23.3](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.23.3)-(October-21,-2024)","page":"Release notes","title":"Version 1.23.3 (October 21, 2024)","text":"","category":"section"},{"location":"release_notes/#Fixed-2","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed a printing bug with scientific numbers in MIME\"text/latex\" (#3838)\nFixed support for AbstractString in set_attribute (#3840)\nFixed a bug reporting vector-valued duals in solution_summary (#3846)\nFixed solution_summary when there are duplicate variable and constraint names (#3848)","category":"page"},{"location":"release_notes/#Other-2","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Documentation improvements (#3828) (#3831) (#3841) (#3843) (#3845)\nAdded the tutorial Tolerances and numerical issues (#3829) (#3830) (#3835)\nImproved the Benders decomposition tutorial (#38232) (#3833) (#3834)\nAdded DifferentiationInterface.jl to Automatic differentiation of user-defined operators (#3836) (#3842)\nAdded the tutorial Writing a solver interface (#3844)\nAdded the section Debugging performance problems (#3850)\nFormatting improvements (#3849)","category":"page"},{"location":"release_notes/#[Version-1.23.2](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.23.2)-(September-13,-2024)","page":"Release notes","title":"Version 1.23.2 (September 13, 2024)","text":"","category":"section"},{"location":"release_notes/#Fixed-3","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed an illegal simplification in MA.operate!! for NonlinearExpr (#3826)","category":"page"},{"location":"release_notes/#Other-3","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added Rolling horizon problems tutorial (#3815)\nAdded more tests for shapes and dual shapes (#3816)\nAdded more packages to extension-tests.yml (#3817) (#3818)\nRemoved an unnecessary test(#3819)\nDocumentation improvements (#3820) (#3822) (#3823)\nAdded PiecewiseLinearOpt.jl to the docs (#3824)","category":"page"},{"location":"release_notes/#[Version-1.23.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.23.1)-(August-30,-2024)","page":"Release notes","title":"Version 1.23.1 (August 30, 2024)","text":"","category":"section"},{"location":"release_notes/#Fixed-4","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed a bug with indicator constraints and the in set syntax (#3813)","category":"page"},{"location":"release_notes/#Other-4","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Updated packages in documentation (#3807)\nUpdated the transitioning from MATLAB tutorial (#3809)\nAdd tutorial Performance problems with sum-if formulations (#3810)","category":"page"},{"location":"release_notes/#[Version-1.23.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.23.0)-(August-13,-2024)","page":"Release notes","title":"Version 1.23.0 (August 13, 2024)","text":"","category":"section"},{"location":"release_notes/#Added","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added set inequality syntax for matrices (#3766)\nImproved matrix inequality support (#3778) (#3805)","category":"page"},{"location":"release_notes/#Fixed-5","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed a method for calling value on a ::Number (#3776)\nFixed querying dual of Symmetric and Hermitian equality constraints (#3797)\nFixed read_from_file for coefficient types other than Float64 (#3801)","category":"page"},{"location":"release_notes/#Other-5","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Documentation improvements\nFixed missing character in installation instructions (#3777)\nAdded a section of querying the Jacobian (#3779)\nClarify that SCIP does not support lazy constraints (#3784)\nFixed typo in knapsack.jl (#3792)\nAdded a warning to docs about tolerances in Bin and Int variables (#3794)\nClarify where to type installation commands (#3795)\nImprove error message for common incorrect syntax in constraint macro (#3781)\nChanged show(::IO, ::GenericModel) to a more informative tree structure (#3803)","category":"page"},{"location":"release_notes/#[Version-1.22.2](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.22.2)-(June-17,-2024)","page":"Release notes","title":"Version 1.22.2 (June 17, 2024)","text":"","category":"section"},{"location":"release_notes/#Fixed-6","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed printing to omit terms when printing a large array of expressions (#3759)\nFixed bug in printing when show is called on an invalid variable or constraint (#3763)","category":"page"},{"location":"release_notes/#Other-6","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Improved error message for unsupported kwargs in variable macro (#3751)\nImproved error message for unsupported container syntax like x[A][B] (#3756)\nDocstring improvements (#3758), (#3760), (#3761), (#3767)\nAdded warning to documentation about Y <= X, Set() syntax (#3769)\nWork-around change on nightly (#3753), (#3754)\nImproved printing of symmetric matrices when used in constraints (#3768)\nFixed a test for upcoming printing change in MOI (#3772)\nUpdated should_i_use.md (#3773)","category":"page"},{"location":"release_notes/#[Version-1.22.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.22.1)-(May-17,-2024)","page":"Release notes","title":"Version 1.22.1 (May 17, 2024)","text":"","category":"section"},{"location":"release_notes/#Fixed-7","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed bug including non-.jl files in src/macros.jl (#3747)","category":"page"},{"location":"release_notes/#Other-7","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added DSDP to the list of supported solvers (#3745)\nUpdated YALMIP migration guide (#3748)","category":"page"},{"location":"release_notes/#[Version-1.22.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.22.0)-(May-12,-2024)","page":"Release notes","title":"Version 1.22.0 (May 12, 2024)","text":"","category":"section"},{"location":"release_notes/#Added-2","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added Base.complex(r, i) where r and i may be real-valued variables or affine or quadratic expressions (#3734)\nAdded @force_nonlinear for controlling when affine and quadratic expressions are instead parsed as nonlinear expressions. This can be useful for advanced users in a limited set of circumstances. (#3732)\nAdded support for returning the variable coefficients of a vector-valued constraint via normalized_coefficient. In addition, set_normalized_coefficients has been softly deprecated (no warning is thrown and old code will still work for all future 1.X releases of JuMP) in favor of set_normalized_coefficient. This change was made to unify how we get and set variable coefficients. (#3743)","category":"page"},{"location":"release_notes/#Fixed-8","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed missing promote_operation method that resulted in slow code (#3730)\nImproved performance of getindex for Containers.DenseAxisArray (#3731)\nFixed the error message when the legacy nonlinear API is mixed with the new nonlinear API. In particular, we now uniformly throw an error message when unexpected objects occur in nonlinear expressions. (#3741)","category":"page"},{"location":"release_notes/#Other-8","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Updated documentation (#3727), (#3728), (#3739)\nUpdated versions in GitHub actions (#3735)","category":"page"},{"location":"release_notes/#[Version-1.21.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.21.1)-(April-11,-2024)","page":"Release notes","title":"Version 1.21.1 (April 11, 2024)","text":"","category":"section"},{"location":"release_notes/#Fixed-9","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed behavior of complex-value related functions like real, imag, conj and abs2 when called on GenericNonlinearExpr. This fixes a method error when calling x' where x is an array of nonlinear expressions. As a related consequence, we now always error when creating nonlinear expressions with complex components. Previously, only some constructors were checked for complex expressionns. (#3724)","category":"page"},{"location":"release_notes/#Other-9","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Documentation improvements (#3719) (#3720) (#3721) (#3722)","category":"page"},{"location":"release_notes/#[Version-1.21.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.21.0)-(March-31,-2024)","page":"Release notes","title":"Version 1.21.0 (March 31, 2024)","text":"","category":"section"},{"location":"release_notes/#Added-3","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added support for matrix inequality constraints with the HermitianPSDCone (#3705)\nAdded batched modification methods for set_normalized_rhs, set_objective_coefficient and set_normalized_coefficient. Using these methods can be more efficient for some solvers (#3716)\nAdded the private constant _CONSTRAINT_LIMIT_FOR_PRINTING, which controls how many constraints are printed to the screen during print(model). The main purpose of this is to prevent large quantities of text being printed when print(model) is accidentally called on a large model. (#3686)","category":"page"},{"location":"release_notes/#Fixed-10","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Changed Containers.SparseAxisArray to use an OrderedDict as the backing data structure. Iterating over the elements in a SparseAxisArray now iterates in the order that the elements were created. Previously, the order was undefined behavior. (#3681)\nFixed complex variables for non-Float64 coefficient types (#3691)\nFixed LinearAlgebra.hermitan(::AbstractJuMPScalar) (#3693)\nFixed multiplying real scalar by Hermitian matrix (#3695)","category":"page"},{"location":"release_notes/#Other-10","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Documentation improvements (#3679) (#3683) (#3702) (#3703) (#3706) (#3696) (#3708) (#3709) (#3711)\nAdded new tutorials:\nBasis matrices (#3675)\nTransitioning from MATLAB (#3698)\nAutomatic differentiation of user-defined operators (#3713)\nUpdated versions and compat bounds (#3687) (#3707) (#3717)","category":"page"},{"location":"release_notes/#[Version-1.20.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.20.0)-(February-15,-2024)","page":"Release notes","title":"Version 1.20.0 (February 15, 2024)","text":"","category":"section"},{"location":"release_notes/#Added-4","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added is_solved_and_feasible (#3668)\nAdded support for MOI.ModelLike as the optimizer (#3667)","category":"page"},{"location":"release_notes/#Fixed-11","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed compat of DimensionalData (#3666)\nFixed convert(::Type{NonlinearExpr}, ::Number)(#3672)","category":"page"},{"location":"release_notes/#Other-11","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added Optim to list of solvers (#3624)\nImproved linking within documentation (#3669)","category":"page"},{"location":"release_notes/#[Version-1.19.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.19.0)-(February-1,-2024)","page":"Release notes","title":"Version 1.19.0 (February 1, 2024)","text":"","category":"section"},{"location":"release_notes/#Added-5","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added support for modifying quadratic coefficients (#3658)","category":"page"},{"location":"release_notes/#Fixed-12","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed short circuiting of && and || in macros (#3655)","category":"page"},{"location":"release_notes/#Other-12","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added SDPLR to list of solvers (#3644)\nAdded new roadmap items (#3645)\nFixed vale.sh version (#3650)\nImprove error messages in macros (#3653)\nRefactoring of set_normalized_coefficient (#3660) (#3661)\nUpdate docs/packages.toml (#3662)","category":"page"},{"location":"release_notes/#[Version-1.18.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.18.1)-(January-6,-2024)","page":"Release notes","title":"Version 1.18.1 (January 6, 2024)","text":"","category":"section"},{"location":"release_notes/#Fixed-13","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed escaping of the set keyword in @variable (#3647)","category":"page"},{"location":"release_notes/#[Version-1.18.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.18.0)-(January-2,-2024)","page":"Release notes","title":"Version 1.18.0 (January 2, 2024)","text":"","category":"section"},{"location":"release_notes/#Added-6","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"This release includes a large refactoring of the macro code that closes a roadmap item (#3629) Contributing pull requests include (#3600), (#3603), (#3606), (#3607), (#3610), (#3611), (#3612), (#3613), (#3614), (#3615), (#3617), (#3618), (#3619), (#3620), (#3621), (#3631), (#3632), (#3633)","category":"page"},{"location":"release_notes/#Fixed-14","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed error for unsupported objective sense (#3601)\nFixed text/latex printing of GenericNonlinearExpr (#3609)\nFixed compat bounds of stdlib packages (#3626)\nFixed a bug that can accidentally modify the user's expressions in a macro (#3639)\nFixed a bug converting AffExpr to GenericNonlinearExpr (#3642)","category":"page"},{"location":"release_notes/#Other-13","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added DisjunctiveProgrammingto extension-tests (#3597)\nAdded DisjunctiveProgrammingto docs (#3598)\nAdded DocumenterCitations to the docs (#3596), (#3630)\nMigrate from SnoopPrecompile to PrecompileTools (#3608)\nMinor documentation updates (#3623), (#3628), (#3635), (#3640), (#3643)","category":"page"},{"location":"release_notes/#[Version-1.17.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.17.0)-(December-4,-2023)","page":"Release notes","title":"Version 1.17.0 (December 4, 2023)","text":"","category":"section"},{"location":"release_notes/#Added-7","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added start_value, lower_bound, and upper_bound support for GenericAffExpr that are equivalent to a single GenericVariableRef (#3551)\nAdded SkipModelConvertScalarSetWrapper which is useful for extensions looking to avoid model_convert (#3552) (#3592)\nAdded lp_matrix_data (#3573) (#3591)","category":"page"},{"location":"release_notes/#Fixed-15","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed variable_ref_type for unsupported types (#3556)\nFixed convert type of constraint starting values (#3571)\nFixed various methods to support AbstractJuMPScalar with Distances.jl (#3583)\nFixed eachindex for multiple arguments of Containers.DenseAxisArray and Containers.SparseAxisArray (#3587)\nExpressions with more than 60 terms now print in truncated form. This prevents large expressions from being accidentally printed to terminal or IJulia output (#3575)\nFixed a type instability in set_objective_coefficient (#3590)\nVarious fixes to the documentation (#3593) (#3595)","category":"page"},{"location":"release_notes/#Other-14","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Improved error messages for:\nAddition and subtraction between a matrix and a scalar (#3557) (#3558)\nVariables with non-constant bounds (#3583)\nInvalid indicator constraints (#3584)\nAdded new solvers to the documentation:\nEAGO.jl (#3560) (#3561)\nManopt.jl (#3568)\nPercival.jl (#3567)\nAdded new tutorials:\nApproximating nonlinear functions (#3563)\nExample: classification problems (#3569)\nImproved documentation for:\nSemicontinuous and Semiinteger variables (#3562)\nSOS1 and SOS2 (#3565)\nstart_value of HermitianPSDCone (#3564)\nFunction tracing (#3570)\nNonlinear operators with vector arguments (#3577)\nIndicator constraints (#3582)\nUpdated package compat bounds (#3578)","category":"page"},{"location":"release_notes/#[Version-1.16.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.16.0)-(October-24,-2023)","page":"Release notes","title":"Version 1.16.0 (October 24, 2023)","text":"","category":"section"},{"location":"release_notes/#Added-8","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added := operator for Boolean satisfiability problems (#3530)","category":"page"},{"location":"release_notes/#Fixed-16","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed text/latex printing of MOI.Interval sets (#3537)\nFixed tests with duplicate function names (#3539)","category":"page"},{"location":"release_notes/#Other-15","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Updated documentation list of supported solvers (#3527) (#3529) (#3538) (#3542) (#3545) (#3546)\nUpdated to Documenter@1.1 (#3528)\nFixed various tutorials (#3534) (#3532)\nFixed Project.toml compat bounds for standard libraries (#3544)","category":"page"},{"location":"release_notes/#[Version-1.15.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.15.1)-(September-24,-2023)","page":"Release notes","title":"Version 1.15.1 (September 24, 2023)","text":"","category":"section"},{"location":"release_notes/#Fixed-17","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed support for single argument min and max operators (#3522)\nFixed error message for add_to_expression! when called with a GenericNonlinearExpr (#3506)\nFixed constraint tags with broadcasted constraints (#3515)\nFixed MethodError in MA.scaling (#3518)\nFixed support for arrays of Parameter variables (#3524)","category":"page"},{"location":"release_notes/#Other-16","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Updated to Documenter@1 (#3501)\nFixed links to data in tutorials (#3512)\nFixed typo in TSP tutorial (#3516)\nImproved error message for VariableNotOwned errors (#3520)\nFixed various JET errors (#3519)","category":"page"},{"location":"release_notes/#[Version-1.15.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.15.0)-(September-15,-2023)","page":"Release notes","title":"Version 1.15.0 (September 15, 2023)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"This is a large minor release because it adds an entirely new data structure and API path for working with nonlinear programs. The previous nonlinear interface remains unchanged and is documented at Nonlinear Modeling (Legacy). The new interface is a treated as a non-breaking feature addition and is documented at Nonlinear Modeling.","category":"page"},{"location":"release_notes/#Breaking","page":"Release notes","title":"Breaking","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Although the new nonlinear interface is a feature addition, there are two changes which might be breaking for a very small number of users.","category":"page"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"The syntax inside JuMP macros is parsed using a different code path, even for linear and quadratic expressions. We made this change to unify how we parse linear, quadratic, and nonlinear expressions. In all cases, the new code returns equivalent expressions, but because of the different order of operations, there are three changes to be aware of when updating:\nThe printed form of the expression may change, for example from x * y to y * x. This can cause tests which test the String representation of a model to fail.\nSome coefficients may change slightly due to floating point round-off error.\nParticularly when working with a JuMP extension, you may encounter a MethodError due to a missing or ambiguous method. These errors are due to previously existing bugs that were not triggered by the previous parsing code. If you encounter such an error, please open a GitHub issue.\nThe methods for Base.:^(x::VariableRef, n::Integer) and Base.:^(x::AffExpr, n::Integer) have changed. Previously, these methods supported only n = 0, 1, 2 and they always returned a QuadExpr, even for the case when n = 0 or n = 1. Now:\nx^0 returns one(T), where T is the value_type of the model (defaults to Float64)\nx^1 returns x\nx^2 returns a QuadExpr\nx^n where !(0 <= n <= 2) returns a NonlinearExpr.\nWe made this change to support nonlinear expressions and to align the mathematical definition of the operation with their return type. (Previously, users were surprised that x^1 returned a QuadExpr.) As a consequence of this change, the methods are now not type-stable. This means that the compiler cannot prove that x^2 returns a QuadExpr. If benchmarking shows that this is a performance problem, you can use the type-stable x * x instead of x^2.","category":"page"},{"location":"release_notes/#Added-9","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added triangle_vec which simplifies adding MOI.LogDetConeTriangle and MOI.RootDetConeTriangle constraints (#3456)\nAdded the new nonlinear interface. This is a very large change. See the documentation at Nonlinear Modeling and the (long) discussion in JuMP.jl#3106. Related PRs are (#3468) (#3472) (#3475) (#3483) (#3487) (#3488) (#3489) (#3504) (#3509)","category":"page"},{"location":"release_notes/#Fixed-18","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed uses of @nospecialize which cause precompilation failures in Julia v1.6.0 and v1.6.1. (#3464)\nFixed adding a container of Parameter (#3473)\nFixed return type of x^0 and x^1 to no longer return QuadExpr (see note in Breaking section above) (#3474)\nFixed error messages in LowerBoundRef, UpperBoundRef, FixRef, IntegerRef, BinaryRef, ParameterRef and related functions (#3494)\nFixed type inference of empty containers in JuMP macros (#3500)","category":"page"},{"location":"release_notes/#Other-17","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added GAMS to solver documentation (#3357)\nUpdated various tutorials (#3459) (#3460) (#3462) (#3463) (#3465) (#3490) (#3492) (#3503)\nAdded The network multi-commodity flow problem tutorial (#3491)\nAdded Two-stage stochastic programs tutorial (#3466)\nAdded better error messages for unsupported operations in LinearAlgebra (#3476)\nUpdated to the latest version of Documenter (#3484) (#3495) (#3497)\nUpdated GitHub action versions (#3507)","category":"page"},{"location":"release_notes/#[Version-1.14.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.14.1)-(September-2,-2023)","page":"Release notes","title":"Version 1.14.1 (September 2, 2023)","text":"","category":"section"},{"location":"release_notes/#Fixed-19","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix links in Documentation (#3478)","category":"page"},{"location":"release_notes/#[Version-1.14.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.14.0)-(August-27,-2023)","page":"Release notes","title":"Version 1.14.0 (August 27, 2023)","text":"","category":"section"},{"location":"release_notes/#Added-10","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added DimensionalData.jl extension (#3413)\nAdded syntactic sugar for the MOI.Parameter set (#3443)\nParameter\nParameterRef\nis_parameter\nparameter_value\nset_parameter_value","category":"page"},{"location":"release_notes/#Fixed-20","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed model_convert for BridgeableConstraint (#3437)\nFixed printing models with integer coefficients larger than typemax(Int) (#3447)\nFixed support for constant left-hand side functions in a complementarity constraint (#3452)","category":"page"},{"location":"release_notes/#Other-18","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Updated packages used in documentation (#3444) (#3455)\nFixed docstring tests (#3445)\nFixed printing change for MathOptInterface (#3446)\nFixed typos in documentation (#3448) (#3457)\nAdded SCIP to callback documentation (#3449)","category":"page"},{"location":"release_notes/#[Version-1.13.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.13.0)-(July-27,-2023)","page":"Release notes","title":"Version 1.13.0 (July 27, 2023)","text":"","category":"section"},{"location":"release_notes/#Added-11","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added support for generic number types (#3377) (#3385)\nAdded fallback for MOI.AbstractSymmetricMatrixSetTriangle and MOI.AbstractSymmetricMatrixSetSquare (#3424)","category":"page"},{"location":"release_notes/#Fixed-21","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed set_start_values with MOI.Bridges.Objective.SlackBridge (#3422)\nFixed flakey doctest in variables.md (#3425)\nFixed names on CITATION.bib (#3423)","category":"page"},{"location":"release_notes/#Other-19","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added Loraine.jl to the installation table (#3426)\nRemoved Penopt.jl from packages.toml (#3428)\nImproved problem statement in cannery example of tutorial (#3430)\nMinor cleanups in Containers.DenseAxisArray implementation (#3429)\nChanged nested_problems.jl: outer/inner to upper/lower (#3433)\nRemoved second SDP relaxation in OPF tutorial (#3432)","category":"page"},{"location":"release_notes/#[Version-1.12.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.12.0)-(June-19,-2023)","page":"Release notes","title":"Version 1.12.0 (June 19, 2023)","text":"","category":"section"},{"location":"release_notes/#Added-12","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added coefficient_type keyword argument to add_bridge and remove_bridge (#3394)","category":"page"},{"location":"release_notes/#Fixed-22","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed error message for matrix in HermitianPSDCone (#3369)\nFixed EditURL for custom documentation pages (#3373)\nFixed return type annotations for MOI.ConstraintPrimal and MOI.ConstraintDual (#3381)\nFixed printing change in Julia nightly (#3391)\nFixed printing of Complex coefficients (#3397)\nFixed printing of constraints in text/latex mode (#3405)\nFixed performance issue in Containers.rowtable (#3410)\nFixed bug when variables added to set of wrong dimension (#3411)","category":"page"},{"location":"release_notes/#Other-20","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added more solver READMEs to the documentation (#3358) (#3360) (#3364) (#3365) (#3366) (#3368) (#3372) (#3374) (#3376) (#3379) (#3387) (#3389)\nAdded StatusSwitchingQP.jl to the installation table (#3354)\nUpdated checklist for adding a new solver (#3370)\nUpdated extension-tests.yml action (#3371) (#3375)\nColor logs in GitHub actions (#3392)\nAdded new tutorials\nOptimal power flow (#3395) (#3412)\nLovász numbers (#3399)\nDualization (#3402)\nUpdated JuMP paper citation (#3400)\nChanged GitHub action to upload LaTeX logs when building documentation (#3403)\nFixed printing of SCS log in documentation (#3406)\nUpdated solver versions (#3407)\nUpdated documentation to use Julia v1.9 (#3398)\nReplaced _value_type with MOI.Utilities.value_type (#3414)\nFixed a typo in docstring (#3415)\nRefactored API documentation (#3386)\nUpdated SCIP license (#3420)","category":"page"},{"location":"release_notes/#[Version-1.11.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.11.1)-(May-19,-2023)","page":"Release notes","title":"Version 1.11.1 (May 19, 2023)","text":"","category":"section"},{"location":"release_notes/#Fixed-23","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed a poor error message when sum(::DenseAxisArray; dims) was called (#3338)\nFixed support for dependent sets in the @variable macro (#3344)\nFixed a performance bug in constraints with sparse symmetric matrices (#3349)","category":"page"},{"location":"release_notes/#Other-21","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Improved the printing of complex numbers (#3332)\nWhen printing, sets which contain constants ending in .0 now print as integers. This follows the behavior of constants in functions (#3341)\nAdded InfiniteOpt to the extensions documentation (#3343)\nAdded more documentation for the exponential cone (#3345) (#3347)\nAdded checklists for developers (#3346) (#3355)\nFixed test support upcoming Julia nightly (#3351)\nFixed extension-tests.yml action (#3353)\nAdd more solvers to the documentation (#3359) (#3361) (#3362)","category":"page"},{"location":"release_notes/#[Version-1.11.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.11.0)-(May-3,-2023)","page":"Release notes","title":"Version 1.11.0 (May 3, 2023)","text":"","category":"section"},{"location":"release_notes/#Added-13","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added new methods to print_active_bridges for printing a particular objective, constraint, or variable (#3316)","category":"page"},{"location":"release_notes/#Fixed-24","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed tests for MOI v1.14.0 release (#3312)\nFixed indexing containers when an axis is Vector{Any} that contains a Vector{Any} element (#3280)\nFixed getindex(::AbstractJuMPScalar) which is called for an expression like x[] (#3314)\nFixed bug in set_string_names_on_creation with a vector of variables (#3322)\nFixed bug in memoize function in nonlinear documentation (#3337)","category":"page"},{"location":"release_notes/#Other-22","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed typos in the documentation (#3317) (#3318) (#3328)\nAdded a test for the order of setting start values (#3315)\nAdded READMEs of solvers and extensions to the docs (#3309) (#3320) (#3327) (#3329) (#3333)\nStyle improvements to src/variables.jl (#3324)\nClarify that column generation does not find global optimum (#3325)\nAdd a GitHub actions workflow for testing extensions prior to release (#3331)\nDocument the release process for JuMP (#3334)\nFix links to discourse and chatroom (#3335)","category":"page"},{"location":"release_notes/#[Version-1.10.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.10.0)-(April-3,-2023)","page":"Release notes","title":"Version 1.10.0 (April 3, 2023)","text":"","category":"section"},{"location":"release_notes/#Added-14","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added Nonnegatives, Nonpositives and Zeros, and support vector-valued inequality syntax in the JuMP macros (#3273)\nAdded special support for LinearAlgebra.Symmetric and LinearAlgebra.Hermitian matrices in Zeros constraints (#3281) (#3296)\nAdded HermitianMatrixSpace and the Hermitian tag for generating a matrix of variables that is Hermitian (#3292) (#3293)\nAdded Semicontinuous and Semiinteger (#3302)\nAdded support for keyword indexing of containers (#3237)","category":"page"},{"location":"release_notes/#Fixed-25","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed [compat] bound for MathOptInterface in Project.toml (#3272)","category":"page"},{"location":"release_notes/#Other-23","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Split out the Nested optimization problems tutorial (#3274)\nUpdated doctests to ensure none have hidden state (#3275) (#3276)\nClarified how lazy constraints may revisit points (#3278)\nAdded P-Norm example (#3282)\nClarified docs that macros create new bindings (#3284)\nFixed threading example (#3283)\nAdded plot to The minimum distortion problem (#3288)\nAdded Google style rules for Vale and fixed warnings (#3285)\nAdded citation for the JuMP 1.0 paper (#3294)\nUpdated package versions in the documentation (#3298)\nAdded comment for the order in which start values must be set (#3303)\nImproved error message for unrecognized constraint operators (#3311)","category":"page"},{"location":"release_notes/#[Version-1.9.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.9.0)-(March-7,-2023)","page":"Release notes","title":"Version 1.9.0 (March 7, 2023)","text":"","category":"section"},{"location":"release_notes/#Added-15","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added get_attribute and set_attribute. These replace get_optimizer_attribute and set_optimizer_attribute, although the _optimizer_ functions remain for backward compatibility. (#3219)\nAdded set_start_values for setting all supported start values in a model (#3238)\nAdd remove_bridge and print_active_bridges (#3259)","category":"page"},{"location":"release_notes/#Fixed-26","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"The matrix returned by a variable in HermitianPSDCone is now a LinearAlgebra.Hermitian matrix. This is potentially breaking if you have written code to assume the return is a Matrix. (#3245) (#3246)\nFixed missing support for Base.isreal of expressions (#3252)","category":"page"},{"location":"release_notes/#Other-24","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed a thread safety issue in the Parallelism tutorial (#3240) (#3243)\nImproved the error message when unsupported operators are used in @NL macros (#3236)\nClarified the documentation to say that matrices in HermitianPSDCone must be LinearAlgebra.Hermitian (#3241)\nMinor style fixes to internal macro code (#3247)\nAdd Example: quantum state discrimination tutorial (#3250)\nImprove error message when begin...end not passed to plural macros (#3255)\nDocument how to register function with varying number of input arguments (#3258)\nTidy tests by removing unneeded JuMP. prefixes (#3260)\nClarified the introduction to the Complex number support tutorial (#3262)\nFixed typos in the Documentation (#3263) (#3266) (#3268) (#3269)","category":"page"},{"location":"release_notes/#[Version-1.8.2](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.8.2)-(February-27,-2023)","page":"Release notes","title":"Version 1.8.2 (February 27, 2023)","text":"","category":"section"},{"location":"release_notes/#Fixed-27","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed dot product between complex JuMP expression and number (#3244)","category":"page"},{"location":"release_notes/#Other-25","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Polish simple SDP examples (#3232)","category":"page"},{"location":"release_notes/#[Version-1.8.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.8.1)-(February-23,-2023)","page":"Release notes","title":"Version 1.8.1 (February 23, 2023)","text":"","category":"section"},{"location":"release_notes/#Fixed-28","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed support for init in nonlinear generator expressions (#3226)","category":"page"},{"location":"release_notes/#Other-26","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Use and document import MathOptInterface as MOI (#3222)\nRemoved references in documentation to multiobjective optimization being unsupported (#3223)\nAdded tutorial on multi-objective portfolio optimization (#3227)\nRefactored some of the conic tutorials (#3229)\nFixed typos in the documentation (#3230)\nAdded tutorial on parallelism (#3231)","category":"page"},{"location":"release_notes/#[Version-1.8.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.8.0)-(February-16,-2023)","page":"Release notes","title":"Version 1.8.0 (February 16, 2023)","text":"","category":"section"},{"location":"release_notes/#Added-16","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added --> syntax support for indicator constraints. The old syntax of => remains supported (#3207)\nAdded <--> syntax for reified constraints. For now, few solvers support reified constraints (#3206)\nAdded fix_discrete_variables. This is most useful for computing the dual of a mixed-integer program (#3208)\nAdded support for vector-valued objectives. For details, see the Multi-objective knapsack tutorial (#3176)","category":"page"},{"location":"release_notes/#Fixed-29","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed a bug in lp_sensitivity_report by switching to an explicit LU factorization of the basis matrix (#3182)\nFixed a bug that prevented [; kwarg] arguments in macros (#3220)","category":"page"},{"location":"release_notes/#Other-27","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Minor fixes to the documentation (#3200) (#3201) (#3203) (#3210)\nAdded tutorial Constraint programming (#3202)\nAdded more examples to Modeling with cones\nRemove _distance_to_set in favor of MOI.Utilities.distance_to_set (#3209)\nImprove The diet problem tutorial by adding the variable as a column in the dataframe (#3213)\nImprove The knapsack problem example tutorial (#3216) (#3217)\nAdded the Example: ellipsoid approximation tutorial (#3218)","category":"page"},{"location":"release_notes/#[Version-1.7.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.7.0)-(January-25,-2023)","page":"Release notes","title":"Version 1.7.0 (January 25, 2023)","text":"","category":"section"},{"location":"release_notes/#Added-17","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added support for view of a Containers.DenseAxisArray (#3152) (#3180)\nAdded support for containers of variables in ComplexPlane (#3184)\nAdded support for minimum and maximum generators in nonlinear expressions (#3189)\nAdded SnoopPrecompile statements that reduce the time-to-first-solve in Julia 1.9 (#3193) (#3195) (#3196) (#3197)","category":"page"},{"location":"release_notes/#Other-28","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Large refactoring of the tests (#3166) (#3167) (#3168) (#3169) (#3170) (#3171)\nRemove unreachable code due to VERSION checks (#3172)\nDocument how to test JuMP extensions (#3174)\nFix method ambiguities in Containers (#3173)\nImprove error message that is thrown when = is used instead of == in the @constraint macro (#3178)\nImprove the error message when Bool is used instead of Bin in the @variable macro (#3180)\nUpdate versions of the documentation (#3185)\nTidy the import of packages and remove unnecessary prefixes (#3186) (#3187)\nRefactor src/JuMP.jl by moving methods into more relevant files (#3188)\nFix docstring of Model not appearing in the documentation (#3198)","category":"page"},{"location":"release_notes/#[Version-1.6.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.6.0)-(January-1,-2023)","page":"Release notes","title":"Version 1.6.0 (January 1, 2023)","text":"","category":"section"},{"location":"release_notes/#Added-18","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added a result keyword argument to solution_summary to allow summarizing models with multiple solutions (#3138)\nAdded relax_with_penalty!, which is a useful tool when debugging infeasible models (#3140)\nAdded has_start_value (#3157)\nAdded support for HermitianPSDCone in constraints (#3154)","category":"page"},{"location":"release_notes/#Fixed-30","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed promotion of complex expressions (#3150) (#3164)","category":"page"},{"location":"release_notes/#Other-29","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added Benders tutorial with in-place resolves (#3145)\nAdded more Tips and tricks for linear programs (#3144) (#3163)\nClarified documentation that start can depend on the indices of a variable container (#3148)\nReplace instances of length and size by the recommended eachindex and axes (#3149)\nAdded a warning explaining why the model is dirty when accessing solution results from a modified model (#3156)\nClarify documentation that PSD ensures a symmetric matrix (#3159)\nMaintenance of the JuMP test suite (#3146) (#3158) (#3162)","category":"page"},{"location":"release_notes/#[Version-1.5.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.5.0)-(December-8,-2022)","page":"Release notes","title":"Version 1.5.0 (December 8, 2022)","text":"","category":"section"},{"location":"release_notes/#Added-19","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Add support for complex-valued variables:\nHermitianPSDCone (#3109)\nComplexPlane and ComplexVariable (#3134)\nAdd support for MOI.OptimizerWithAttributes in set_optimizer_attribute and get_optimizer_attribute (#3129)","category":"page"},{"location":"release_notes/#Fixed-31","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed error message for vectorized interval constraints (#3123)\nFixed passing AbstractString to set_optimizer_attribute (#3127)","category":"page"},{"location":"release_notes/#Other-30","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Update package versions used in docs (#3119) (#3133) (#3139)\nFixed output of diet tutorial (#3120)\nExplain how to use Dates.period in set_time_limit_sec (#3121)\nUpdate to JuliaFormatter v1.0.15 (#3130)\nFixed HTTP server example in web_app.jl (#3131)\nUpdate docs to build with Documenter#master (#3094)\nAdd tests for LinearAlgebra operations (#3132)\nTidy these release notes (#3135)\nAdded documentation for Complex number support (#3141)\nRemoved the \"workforce scheduling\" and \"steelT3\" tutorials (#3143)","category":"page"},{"location":"release_notes/#[Version-1.4.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.4.0)-(October-29,-2022)","page":"Release notes","title":"Version 1.4.0 (October 29, 2022)","text":"","category":"section"},{"location":"release_notes/#Added-20","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added Containers.rowtable which converts a container into a vector of NamedTuples to support the Tables.jl interface. This simplifies converting Containers.DenseAxisArray and Containers.SparseAxisArray objects into tabular forms such as a DataFrame (#3104)\nAdded a new method to Containers.container so that index names are passed to the container (#3088)","category":"page"},{"location":"release_notes/#Fixed-32","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed a bug in copy_to(dest::Model, src::MOI.ModelLike) when src has nonlinear components (#3101)\nFixed the printing of (-1.0 + 0.0im) coefficients in complex expressions (#3112)\nFixed a parsing bug in nonlinear expressions with generator statements that contain multiple for statements (#3116)","category":"page"},{"location":"release_notes/#Other-31","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Converted the multi-commodity flow tutorial to use an SQLite database (#3098)\nFixed a number of typos in the documentation (#3103) (#3107) (#3018)\nImproved various style aspects of the PDF documentation (#3095) (#3098) (#3102)","category":"page"},{"location":"release_notes/#[Version-1.3.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.3.1)-(September-28,-2022)","page":"Release notes","title":"Version 1.3.1 (September 28, 2022)","text":"","category":"section"},{"location":"release_notes/#Fixed-33","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed a performance issue in relax_integrality (#3087)\nFixed the type stability of operators with Complex arguments (#3072)\nFixed a bug which added additional +() terms to some nonlinear expressions (#3091)\nFixed potential method ambiguities with AffExpr and QuadExpr objects (#3092)","category":"page"},{"location":"release_notes/#Other-32","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added vale as a linter for the documentation (#3080)\nAdded a tutorial on debugging JuMP models (#3043)\nFixed a number of typos in the documentation (#3079) (#3083)\nMany other small tweaks to the documentation (#3068) (#3073) (#3074) (#3075) (#3076) (#3077) (#3078) (#3081) (#3082) (#3084) (#3085) (#3089)","category":"page"},{"location":"release_notes/#[Version-1.3.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.3.0)-(September-5,-2022)","page":"Release notes","title":"Version 1.3.0 (September 5, 2022)","text":"","category":"section"},{"location":"release_notes/#Added-21","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Support slicing in SparseAxisArray (#3031)","category":"page"},{"location":"release_notes/#Fixed-34","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed a bug introduced in v1.2.0 that prevented DenseAxisArrays with Vector keys (#3064)","category":"page"},{"location":"release_notes/#Other-33","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Released the JuMP logos under the CC BY 4.0 license (#3063)\nMinor tweaks to the documentation (#3054) (#3056) (#3057) (#3060) (#3061) (#3065)\nImproved code coverage of a number of files (#3048) (#3049) (#3050) (#3051) (#3052) (#3053) (#3058) (#3059)","category":"page"},{"location":"release_notes/#[Version-1.2.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.2.1)-(August-22,-2022)","page":"Release notes","title":"Version 1.2.1 (August 22, 2022)","text":"","category":"section"},{"location":"release_notes/#Fixed-35","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed a bug when parsing two-sided nonlinear constraints (#3045)","category":"page"},{"location":"release_notes/#[Version-1.2.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.2.0)-(August-16,-2022)","page":"Release notes","title":"Version 1.2.0 (August 16, 2022)","text":"","category":"section"},{"location":"release_notes/#Breaking-2","page":"Release notes","title":"Breaking","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"This is a large minor release because it significantly refactors the internal code for handling nonlinear programs to use the MathOptInterface.Nonlinear submodule that was introduced in MathOptInterface v1.3.0. As a consequence, the internal datastructure in model.nlp_data has been removed, as has the JuMP._Derivatives submodule. Despite the changes, the public API for nonlinear programming has not changed, and any code that uses only the public API and that worked with v1.1.1 will continue to work with v1.2.0.","category":"page"},{"location":"release_notes/#Added-22","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added all_constraints(model; include_variable_in_set_constraints) which simplifies returning a list of all constraint indices in the model.\nAdded the ability to delete nonlinear constraints via delete(::Model, ::NonlinearConstraintRef).\nAdded the ability to provide an explicit Hessian for a multivariate user-defined function.\nAdded support for querying the primal value of a nonlinear constraint via value(::NonlinearConstraintRef)","category":"page"},{"location":"release_notes/#Fixed-36","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed a bug in Containers.DenseAxisArray so that it now supports indexing with keys that hash to the same value, even if they are different types, for example, Int32 and Int64.\nFixed a bug printing the model when the solver does not support MOI.Name.","category":"page"},{"location":"release_notes/#Other-34","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added a constraint programming formulation to the Sudoku tutorial.\nAdded newly supported solvers Pajarito, Clarabel, and COPT to the installation table.\nFixed a variety of other miscellaneous issues in the documentation.","category":"page"},{"location":"release_notes/#[Version-1.1.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.1.1)-(June-14,-2022)","page":"Release notes","title":"Version 1.1.1 (June 14, 2022)","text":"","category":"section"},{"location":"release_notes/#Other-35","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed problem displaying LaTeX in the documentation\nMinor updates to the style guide\nUpdated to MOI v1.4.0 in the documentation","category":"page"},{"location":"release_notes/#[Version-1.1.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.1.0)-(May-25,-2022)","page":"Release notes","title":"Version 1.1.0 (May 25, 2022)","text":"","category":"section"},{"location":"release_notes/#Added-23","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added num_constraints(::Model; count_variable_in_set_constraints) to simplify the process of counting the number of constraints in a model\nAdded VariableRef(::ConstraintRef) for querying the variable associated with a bound or integrality constraint.\nAdded set_normalized_coefficients for modifying the variable coefficients of a vector-valued constraint.\nAdded set_string_names_on_creation to disable creating String names for variables and constraints. This can improve performance.","category":"page"},{"location":"release_notes/#Fixed-37","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed a bug passing nothing to the start keyword of @variable","category":"page"},{"location":"release_notes/#Other-36","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"New tutorials:\nSensitivity analysis of a linear program\nServing web apps\nMinimal ellipse SDP tutorial refactored and improved\nDocs updated to the latest version of each package\nLots of minor fixes and improvements to the documentation","category":"page"},{"location":"release_notes/#[Version-1.0.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v1.0.0)-(March-24,-2022)","page":"Release notes","title":"Version 1.0.0 (March 24, 2022)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Read more about this release, along with an acknowledgement of all the contributors in our JuMP 1.0.0 is released blog post.","category":"page"},{"location":"release_notes/#Breaking-3","page":"Release notes","title":"Breaking","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"The previously deprecated functions (v0.23.0, v0.23.1) have been removed. Deprecation was to improve consistency of function names:\nnum_nl_constraints (see num_nonlinear_constraints)\nall_nl_constraints (see all_nonlinear_constraints)\nadd_NL_expression (see add_nonlinear_expression)\nset_NL_objective (see set_nonlinear_objective)\nadd_NL_constraint (see add_nonlinear_constraint)\nnl_expr_string (see nonlinear_expr_string)\nnl_constraint_string (see nonlinear_constraint_string)\nSymMatrixSpace (see SymmetricMatrixSpace)\nThe unintentionally exported variable JuMP.op_hint has been renamed to the unexported JuMP._OP_HINT","category":"page"},{"location":"release_notes/#Fixed-38","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed a bug writing .nl files\nFixed a bug broadcasting SparseAxisArrays","category":"page"},{"location":"release_notes/#[Version-0.23.2](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.23.2)-(March-14,-2022)","page":"Release notes","title":"Version 0.23.2 (March 14, 2022)","text":"","category":"section"},{"location":"release_notes/#Added-24","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added relative_gap to solution_summary\nregister now throws an informative error if the function is not differentiable using ForwardDiff. In some cases, the check in register will encounter a false negative, and the informative error will be thrown at run-time. This usually happens when the function is non-differentiable in a subset of the domain.","category":"page"},{"location":"release_notes/#Fixed-39","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed a scoping issue when extending the container keyword of containers","category":"page"},{"location":"release_notes/#Other-37","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Docs updated to the latest version of each package","category":"page"},{"location":"release_notes/#[Version-0.23.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.23.1)-(March-2,-2022)","page":"Release notes","title":"Version 0.23.1 (March 2, 2022)","text":"","category":"section"},{"location":"release_notes/#Deprecated","page":"Release notes","title":"Deprecated","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"nl_expr_string and nl_constraint_string have been renamed to nonlinear_expr_string and nonlinear_constraint_string. The old methods still exist with deprecation warnings. This change should impact very few users because to call them you must rely on private internals of the nonlinear API. Users are encouraged to use sprint(show, x) instead, where x is the nonlinear expression or constraint of interest.","category":"page"},{"location":"release_notes/#Added-25","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added support for Base.abs2(x) where x is a variable or affine expression. This is mainly useful for complex-valued constraints.","category":"page"},{"location":"release_notes/#Fixed-40","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed addition of complex and real affine expressions\nFixed arithmetic for Complex-valued quadratic expressions\nFixed variable bounds passed as Rational{Int}(Inf)\nFixed printing of the coefficient (0 + 1im)\nFixed a bug when solution_summary is called prior to optimize!","category":"page"},{"location":"release_notes/#[Version-0.23.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.23.0)-(February-25,-2022)","page":"Release notes","title":"Version 0.23.0 (February 25, 2022)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"JuMP v0.23.0 is a breaking release. It is also a release-candidate for JuMP v1.0.0. That is, if no issues are found with the v0.23.0 release, then it will be re-tagged as v1.0.0.","category":"page"},{"location":"release_notes/#Breaking-4","page":"Release notes","title":"Breaking","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Julia 1.6 is now the minimum supported version\nMathOptInterface has been updated to v1.0.0\nAll previously deprecated functionality has been removed\nPrintMode, REPLMode and IJuliaMode have been removed in favor of the MIME types MIME\"text/plain\" and MIME\"text/latex\". Replace instances of ::Type{REPLMode} with ::MIME\"text/plain\", REPLMode with MIME(\"text/plain\"), ::Type{IJuliaMode} with ::MIME\"text/latex\", and IJuliaMode with MIME(\"text/latex\").\nFunctions containing the nl_ acronym have been renamed to the more explicit nonlinear_. For example, num_nl_constraints is now num_nonlinear_constraints and set_NL_objective is now set_nonlinear_objective. Calls to the old functions throw an error explaining the new name.\nSymMatrixSpace has been renamed to SymmetricMatrixSpace","category":"page"},{"location":"release_notes/#Added-26","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added nonlinear_dual_start_value and set_nonlinear_dual_start_value\nAdded preliminary support for Complex coefficient types","category":"page"},{"location":"release_notes/#Fixed-41","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed a bug in solution_summary","category":"page"},{"location":"release_notes/#Other-38","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"MILP examples have been migrated from GLPK to HiGHS\nFixed various typos\nImproved section on setting constraint start values","category":"page"},{"location":"release_notes/#Troubleshooting-problems-when-updating","page":"Release notes","title":"Troubleshooting problems when updating","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"If you experience problems when updating, you are likely using previously deprecated functionality. (By default, Julia does not warn when you use deprecated features.)","category":"page"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"To find the deprecated features you are using, start Julia with --depwarn=yes:","category":"page"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"$ julia --depwarn=yes","category":"page"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Then install JuMP v0.22.3:","category":"page"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"julia> using Pkg\njulia> pkg\"add JuMP@0.22.3\"","category":"page"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"And then run your code. Apply any suggestions, or search the release notes below for advice on updating a specific deprecated feature.","category":"page"},{"location":"release_notes/#[Version-0.22.3](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.22.3)-(February-10,-2022)","page":"Release notes","title":"Version 0.22.3 (February 10, 2022)","text":"","category":"section"},{"location":"release_notes/#Fixed-42","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed a reproducibility issue in the TSP tutorial\nFixed a reproducibility issue in the max_cut_sdp tutorial\nFixed a bug broadcasting an empty SparseAxisArray","category":"page"},{"location":"release_notes/#Other-39","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added a warning and improved documentation for the modify-then-query case\nFixed a typo in the docstring of RotatedSecondOrderCone\nAdded Aqua.jl as a check for code health\nAdded introductions to each section of the tutorials\nImproved the column generation and Benders decomposition tutorials\nUpdated documentation to MOI v0.10.8\nUpdated JuliaFormatter to v0.22.2","category":"page"},{"location":"release_notes/#[Version-0.22.2](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.22.2)-(January-10,-2022)","page":"Release notes","title":"Version 0.22.2 (January 10, 2022)","text":"","category":"section"},{"location":"release_notes/#Added-27","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"The function all_nl_constraints now returns all nonlinear constraints in a model\nstart_value and set_start_value can now be used to get and set the primal start for constraint references\nPlural macros now return a tuple containing the elements that were defined instead of nothing\nAnonymous variables are now printed as _[i] where i is the index of the variable instead of noname. Calling name(x) still returns \"\" so this is non-breaking.","category":"page"},{"location":"release_notes/#Fixed-43","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed handling of min and max in nonlinear expressions\nCartesianIndex is no longer allowed as a key for DenseAxisArrays.","category":"page"},{"location":"release_notes/#Other-40","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Improved the performance of GenericAffExpr\nAdded a tutorial on the Travelling Salesperson Problem\nAdded a tutorial on querying the Hessian of a nonlinear program\nAdded documentation on using custom solver binaries.","category":"page"},{"location":"release_notes/#[Version-0.22.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.22.1)-(November-29,-2021)","page":"Release notes","title":"Version 0.22.1 (November 29, 2021)","text":"","category":"section"},{"location":"release_notes/#Added-28","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Export OptimizationSense enum, with instances: MIN_SENSE, MAX_SENSE, and FEASIBILITY_SENSE\nAdd Base.isempty(::Model) to match Base.empty(::Model)","category":"page"},{"location":"release_notes/#Fixed-44","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix bug in container with tuples as indices\nFix bug in set_time_limit_sec","category":"page"},{"location":"release_notes/#Other-41","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Add tutorial \"Design patterns for larger models\"\nRemove release notes section from PDF\nGeneral edits of the documentation and error messages","category":"page"},{"location":"release_notes/#[Version-0.22.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.22.0)-(November-10,-2021)","page":"Release notes","title":"Version 0.22.0 (November 10, 2021)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"JuMP v0.22 is a breaking release","category":"page"},{"location":"release_notes/#Breaking-5","page":"Release notes","title":"Breaking","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"JuMP 0.22 contains a number of breaking changes. However, these should be invisible for the majority of users. You will mostly encounter these breaking changes if you: wrote a JuMP extension, accessed backend(model), or called @SDconstraint.","category":"page"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"The breaking changes are as follows:","category":"page"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"MathOptInterface has been updated to v0.10.4. For users who have interacted with the MOI backend, this contains a large number of breaking changes. Read the MathOptInterface release notes for more details.\nThe bridge_constraints keyword argument to Model and set_optimizer has been renamed add_bridges to reflect that more thing were bridged than just constraints.\nThe backend(model) field now contains a concrete instance of a MOI.Utilities.CachingOptimizer instead of one with an abstractly typed optimizer field. In most cases, this will lead to improved performance. However, calling set_optimizer after backend invalidates the old backend. For example:\nmodel = Model()\nb = backend(model)\nset_optimizer(model, GLPK.Optimizer)\n@variable(model, x)\n# b is not updated with `x`! Get a new b by calling `backend` again.\nnew_b = backend(model)\nAll usages of @SDconstraint are deprecated. The new syntax is @constraint(model, X >= Y, PSDCone()).\nCreating a DenseAxisArray with a Number as an axis will now display a warning. This catches a common error in which users write @variable(model, x[length(S)]) instead of @variable(model, x[1:length(S)]).\nThe caching_mode argument to Model, for example, Model(caching_mode = MOIU.MANUAL) mode has been removed. For more control over the optimizer, use direct_model instead.\nThe previously deprecated lp_objective_perturbation_range and lp_rhs_perturbation_range functions have been removed. Use lp_sensitivity_report instead.\nThe .m fields of NonlinearExpression and NonlinearParameter have been renamed to .model.\nInfinite variable bounds are now ignored. Thus, @variable(model, x <= Inf) will show has_upper_bound(x) == false. Previously, these bounds were passed through to the solvers which caused numerical issues for solvers expecting finite bounds.\nThe variable_type and constraint_type functions were removed. This should only affect users who previously wrote JuMP extensions. The functions can be deleted without consequence.\nThe internal functions moi_mode, moi_bridge_constraints, moi_add_constraint, and moi_add_to_function_constant are no longer exported.\nThe un-used method Containers.generate_container has been deleted.\nThe Containers API has been refactored, and _build_ref_sets is now public as Containers.build_ref_sets.\nThe parse_constraint_ methods for extending @constraint at parse time have been refactored in a breaking way. Consult the Extensions documentation for more details and examples.","category":"page"},{"location":"release_notes/#Added-29","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"The TerminationStatusCode and ResultStatusCode enums are now exported by JuMP. Prefer termination_status(model) == OPTIMAL instead of == MOI.OPTIMAL, although the MOI. prefix way still works.\nCopy a x::DenseAxisArray to an Array by calling Array(x).\nNonlinearExpression is now a subtype of AbstractJuMPScalar\nConstraints such as @constraint(model, x + 1 in MOI.Integer()) are now supported.\nprimal_feasibility_report now accepts a function as the first argument.\nScalar variables @variable(model, x[1:2] in MOI.Integer()) creates two variables, both of which are constrained to be in the set MOI.Integer.\nConic constraints can now be specified as inequalities under a different partial ordering. So @constraint(model, x - y in MOI.Nonnegatives()) can now be written as @constraint(model, x >= y, MOI.Nonnegatives()).\nNames are now set for vectorized constraints.","category":"page"},{"location":"release_notes/#Fixed-45","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed a performance issue when show was called on a SparseAxisArray with a large number of elements.\nFixed a bug displaying barrier and simplex iterations in solution_summary.\nFixed a bug by implementing hash for DenseAxisArray and SparseAxisArray.\nNames are now only set if the solver supports them. Previously, this prevented solvers such as Ipopt from being used with direct_model.\nMutableArithmetics.Zero is converted into a 0.0 before being returned to the user. Previously, some calls to @expression would return the undocumented MutableArithmetics.Zero() object. One example is summing over an empty set @expression(model, sum(x[i] for i in 1:0)). You will now get 0.0 instead.\nAffExpr and QuadExpr can now be used with == 0 instead of iszero. This fixes a number of issues relating to Julia standard libraries such as LinearAlgebra and SparseArrays.\nFixed a bug when registering a user-defined function with splatting.","category":"page"},{"location":"release_notes/#Other-42","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"The documentation is now available as a PDF.\nThe documentation now includes a full copy of the MathOptInterface documentation to make it easy to link concepts between the docs. (The MathOptInterface documentation has also been significantly improved.)\nThe documentation contains a large number of improvements and clarifications on a range of topics. Thanks to @sshin23, @DilumAluthge, and @jlwether.\nThe documentation is now built with Julia 1.6 instead of 1.0.\nVarious error messages have been improved to be more readable.","category":"page"},{"location":"release_notes/#[Version-0.21.10](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.21.10)-(September-4,-2021)","page":"Release notes","title":"Version 0.21.10 (September 4, 2021)","text":"","category":"section"},{"location":"release_notes/#Added-30","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added add_NL_expression\nadd_NL_xxx functions now support AffExpr and QuadExpr as terms","category":"page"},{"location":"release_notes/#Fixed-46","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed a bug in solution_summary\nFixed a bug in relax_integrality","category":"page"},{"location":"release_notes/#Other-43","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Improved error message in lp_sensitivity_report","category":"page"},{"location":"release_notes/#[Version-0.21.9](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.21.9)-(August-1,-2021)","page":"Release notes","title":"Version 0.21.9 (August 1, 2021)","text":"","category":"section"},{"location":"release_notes/#Added-31","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Containers now support arbitrary container types by passing the type to the container keyword and overloading Containers.container.\nis_valid now supports nonlinear constraints\nAdded unsafe_backend for querying the inner-most optimizer of a JuMP model.\nNonlinear parameters now support the plural @NLparameters macro.\nContainers (for example, DenseAxisArray) can now be used in vector-valued constraints.","category":"page"},{"location":"release_notes/#Other-44","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Various improvements to the documentation.","category":"page"},{"location":"release_notes/#[Version-0.21.8](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.21.8)-(May-8,-2021)","page":"Release notes","title":"Version 0.21.8 (May 8, 2021)","text":"","category":"section"},{"location":"release_notes/#Added-32","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"The @constraint macro is now extendable in the same way as @variable.\nAffExpr and QuadExpr can now be used in nonlinear macros.","category":"page"},{"location":"release_notes/#Fixed-47","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed a bug in lp_sensitivity_report.\nFixed an inference issue when creating empty SparseAxisArrays.","category":"page"},{"location":"release_notes/#[Version-0.21.7](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.21.7)-(April-12,-2021)","page":"Release notes","title":"Version 0.21.7 (April 12, 2021)","text":"","category":"section"},{"location":"release_notes/#Added-33","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added primal_feasibility_report, which can be used to check whether a primal point satisfies primal feasibility.\nAdded coefficient, which returns the coefficient associated with a variable in affine and quadratic expressions.\nAdded copy_conflict, which returns the IIS of an infeasible model.\nAdded solution_summary, which returns (and prints) a struct containing a summary of the solution.\nAllow AbstractVector in vector constraints instead of just Vector.\nAdded latex_formulation(model) which returns an object representing the latex formulation of a model. Use print(latex_formulation(model)) to print the formulation as a string.\nUser-defined functions in nonlinear expressions are now automatically registered to aid quick model prototyping. However, a warning is printed to encourage the manual registration.\nDenseAxisArray's now support broadcasting over multiple arrays.\nContainer indices can now be iterators of Base.SizeUnknown.","category":"page"},{"location":"release_notes/#Fixed-48","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed bug in rad2deg and deg2rad in nonlinear expressions.\nFixed a MethodError bug in Containers when forcing container type.\nAllow partial slicing of a DenseAxisArray, resolving an issue from 2014.\nFixed a bug printing variable names in IJulia.\nEnding an IJulia cell with model now prints a summary of the model (like in the REPL) not the latex formulation. Use print(model) to print the latex formulation.\nFixed a bug when copying models containing nested arrays.","category":"page"},{"location":"release_notes/#Other-45","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Tutorials are now part of the documentation, and more refactoring has taken place.\nAdded JuliaFormatter added as a code formatter.\nAdded some precompilation statements to reduce initial latency.\nVarious improvements to error messages to make them more helpful.\nImproved performance of value(::NonlinearExpression).\nImproved performance of fix(::VariableRef).","category":"page"},{"location":"release_notes/#[Version-0.21.6](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.21.6)-(January-29,-2021)","page":"Release notes","title":"Version 0.21.6 (January 29, 2021)","text":"","category":"section"},{"location":"release_notes/#Added-34","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added support for skew symmetric variables via @variable(model, X[1:2, 1:2] in SkewSymmetricMatrixSpace()).\nlp_sensitivity_report has been added which significantly improves the performance of querying the sensitivity summary of an LP. lp_objective_perturbation_range and lp_rhs_perturbation_range are deprecated.\nDual warm-starts are now supported with set_dual_start_value and dual_start_value.\n∈ (\\in) can now be used in macros instead of = or in.\nUse haskey(model::Model, key::Symbol) to check if a name key is registered in a model.\nAdded unregister(model::Model, key::Symbol) to unregister a name key from model.\nAdded callback_node_status for use in callbacks.\nAdded print_bridge_graph to visualize the bridging graph generated by MathOptInterface.\nImproved error message for containers with duplicate indices.","category":"page"},{"location":"release_notes/#Fixed-49","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Various fixes to pass tests on Julia 1.6.\nFixed a bug in the printing of nonlinear expressions in IJulia.\nFixed a bug when nonlinear expressions are passed to user-defined functions.\nSome internal functions that were previously exported are now no longer exported.\nFixed a bug when relaxing a fixed binary variable.\nFixed a StackOverflowError that occurred when SparseAxisArrays had a large number of elements.\nRemoved an unnecessary type assertion in list_of_constraint_types.\nFixed a bug when copying models with registered expressions.","category":"page"},{"location":"release_notes/#Other-46","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"The documentation has been significantly overhauled. It now has distinct sections for the manual, API reference, and examples. The existing examples in /examples have now been moved to /docs/src/examples and rewritten using Literate.jl, and they are now included in the documentation.\nJuliaFormatter has been applied to most of the codebase. This will continue to roll out over time, as we fix upstream issues in the formatter, and will eventually become compulsory.\nThe root cause of a large number of method invalidations has been resolved.\nWe switched continuous integration from Travis and Appveyor to GitHub Actions.","category":"page"},{"location":"release_notes/#[Version-0.21.5](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.21.5)-(September-18,-2020)","page":"Release notes","title":"Version 0.21.5 (September 18, 2020)","text":"","category":"section"},{"location":"release_notes/#Fixed-50","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed deprecation warnings\nThrow DimensionMismatch for incompatibly sized functions and sets\nUnify treatment of keys(x) on JuMP containers","category":"page"},{"location":"release_notes/#[Version-0.21.4](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.21.4)-(September-14,-2020)","page":"Release notes","title":"Version 0.21.4 (September 14, 2020)","text":"","category":"section"},{"location":"release_notes/#Added-35","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Add debug info when adding unsupported constraints\nAdd relax_integrality for solving continuous relaxation\nAllow querying constraint conflicts","category":"page"},{"location":"release_notes/#Fixed-51","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Dispatch on Real for MOI.submit\nImplement copy for CustomSet in tests\nDon't export private macros\nFix invalid assertion in nonlinear\nError if constraint has NaN right-hand side\nImprove speed of tests\nLots of work modularizing files in /test\nImprove line numbers in macro error messages\nPrint nonlinear subexpressions\nVarious documentation updates\nDependency updates:\nDatastructures 0.18\nMathOptFormat v0.5\nPrep for MathOptInterface 0.9.15","category":"page"},{"location":"release_notes/#[Version-0.21.3](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.21.3)-(June-18,-2020)","page":"Release notes","title":"Version 0.21.3 (June 18, 2020)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added Special Order Sets (SOS1 and SOS2) to JuMP with default weights to ease the creation of such constraints (#2212).\nAdded functions simplex_iterations, barrier_iterations and node_count (#2201).\nAdded function reduced_cost (#2205).\nImplemented callback_value for affine and quadratic expressions (#2231).\nSupport MutableArithmetics.Zero in objective and constraints (#2219).\nDocumentation improvements:\nMention tutorials in the docs (#2223).\nUpdate COIN-OR links (#2242).\nExplicit link to the documentation of MOI.FileFormats (#2253).\nTypo fixes (#2261).\nContainers improvements:\nFix Base.map for DenseAxisArray (#2235).\nThrow BoundsError if number of indices is incorrect for DenseAxisArray and SparseAxisArray (#2240).\nExtensibility improvements:\nImplement a set_objective method fallback that redirects to set_objective_sense and set_objective_function (#2247).\nAdd parse_constraint method with arbitrary number of arguments (#2051).\nAdd parse_constraint_expr and parse_constraint_head (#2228).","category":"page"},{"location":"release_notes/#[Version-0.21.2](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.21.2)-(April-2,-2020)","page":"Release notes","title":"Version 0.21.2 (April 2, 2020)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added relative_gap() to access MOI.RelativeGap() attribute (#2199).\nDocumentation fixes:\nAdded link to source for docstrings in the documentation (#2207).\nAdded docstring for @variables macro (#2216).\nTypo fixes (#2177, #2184, #2182).\nImplementation of methods for Base functions:\nImplemented Base.empty! for JuMP.Model (#2198).\nImplemented Base.conj for JuMP scalar types (#2209).","category":"page"},{"location":"release_notes/#Fixed-52","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed sum of expression with scalar product in macro (#2178).\nFixed writing of nonlinear models to MathOptFormat (#2181).\nFixed construction of empty SparseAxisArray (#2179).\nFixed constraint with zero function (#2188).","category":"page"},{"location":"release_notes/#[Version-0.21.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.21.1)-(Feb-18,-2020)","page":"Release notes","title":"Version 0.21.1 (Feb 18, 2020)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Improved the clarity of the with_optimizer deprecation warning.","category":"page"},{"location":"release_notes/#[Version-0.21.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.21.0)-(Feb-16,-2020)","page":"Release notes","title":"Version 0.21.0 (Feb 16, 2020)","text":"","category":"section"},{"location":"release_notes/#Breaking-6","page":"Release notes","title":"Breaking","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Deprecated with_optimizer (#2090, #2084, #2141). You can replace with_optimizer by either nothing, optimizer_with_attributes or a closure:\nreplace with_optimizer(Ipopt.Optimizer) by Ipopt.Optimizer.\nreplace with_optimizer(Ipopt.Optimizer, max_cpu_time=60.0) by optimizer_with_attributes(Ipopt.Optimizer, \"max_cpu_time\" => 60.0).\nreplace with_optimizer(Gurobi.Optimizer, env) by () -> Gurobi.Optimizer(env).\nreplace with_optimizer(Gurobi.Optimizer, env, Presolve=0) by optimizer_with_attributes(() -> Gurobi.Optimizer(env), \"Presolve\" => 0).\nalternatively to optimizer_with_attributes, you can also set the attributes separately with set_optimizer_attribute.\nRenamed set_parameter and set_parameters to set_optimizer_attribute and set_optimizer_attributes (#2150).\nBroadcast should now be explicit inside macros. @SDconstraint(model, x >= 1) and @constraint(model, x + 1 in SecondOrderCone()) now throw an error instead of broadcasting 1 along the dimension of x (#2107).\n@SDconstraint(model, x >= 0) is now equivalent to @constraint(model, x in PSDCone()) instead of @constraint(model, (x .- 0) in PSDCone()) (#2107).\nThe macros now create the containers with map instead of for loops, as a consequence, containers created by @expression can now have any element type and containers of constraint references now have concrete element types when possible. This fixes a long-standing issue where @expression could only be used to generate a collection of linear expressions. Now it works for quadratic expressions as well (#2070).\nCalling deepcopy(::AbstractModel) now throws an error.\nThe constraint name is now printed in the model string (#2108).","category":"page"},{"location":"release_notes/#Added-36","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added support for solver-independent and solver-specific callbacks (#2101).\nAdded write_to_file and read_from_file, supported formats are CBF, LP, MathOptFormat, MPS and SDPA (#2114).\nAdded support for complementarity constraints (#2132).\nAdded support for indicator constraints (#2092).\nAdded support for querying multiple solutions with the result keyword (#2100).\nAdded support for constraining variables on creation (#2128).\nAdded method delete that deletes a vector of variables at once if it is supported by the underlying solver (#2135).\nThe arithmetic between JuMP expression has be refactored into the MutableArithmetics package (#2107).\nImproved error on complex values in NLP (#1978).\nAdded an example of column generation (#2010).","category":"page"},{"location":"release_notes/#Fixed-53","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Incorrect coefficients generated when using Symmetric variables (#2102)","category":"page"},{"location":"release_notes/#[Version-0.20.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.20.1)-(Oct-18,-2019)","page":"Release notes","title":"Version 0.20.1 (Oct 18, 2019)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Add sections on @variables and @constraints in the documentation (#2062).\nFixed product of sparse matrices for Julia v1.3 (#2063).\nAdded set_objective_coefficient to modify the coefficient of a linear term of the objective function (#2008).\nAdded set_time_limit_sec, unset_time_limit_sec and time_limit_sec to set and query the time limit for the solver in seconds (#2053).","category":"page"},{"location":"release_notes/#[Version-0.20.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.20.0)-(Aug-24,-2019)","page":"Release notes","title":"Version 0.20.0 (Aug 24, 2019)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Documentation updates.\nNumerous bug fixes.\nBetter error messages (#1977, #1978, #1997, #2017).\nPerformance improvements (#1947, #2032).\nAdded LP sensitivity summary functions lp_objective_perturbation_range and lp_rhs_perturbation_range (#1917).\nAdded functions dual_objective_value, raw_status and set_parameter.\nAdded function set_objective_coefficient to modify the coefficient of a linear term of the objective (#2008).\nAdded functions set_normalized_rhs, normalized_rhs, and add_to_function_constant to modify and get the constant part of a constraint (#1935, #1960).\nAdded functions set_normalized_coefficient and normalized_coefficient to modify and get the coefficient of a linear term of a constraint (#1935, #1960).\nNumerous other improvements in MOI 0.9, see the NEWS.md file of MOI for more details.","category":"page"},{"location":"release_notes/#[Version-0.19.2](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.19.2)-(June-8,-2019)","page":"Release notes","title":"Version 0.19.2 (June 8, 2019)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix a bug in derivatives that could arise in models with nested nonlinear subexpressions.","category":"page"},{"location":"release_notes/#[Version-0.19.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.19.1)-(May-12,-2019)","page":"Release notes","title":"Version 0.19.1 (May 12, 2019)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Usability and performance improvements.\nBug fixes.","category":"page"},{"location":"release_notes/#[Version-0.19.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.19.0)-(February-15,-2019)","page":"Release notes","title":"Version 0.19.0 (February 15, 2019)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"JuMP 0.19 contains significant breaking changes.","category":"page"},{"location":"release_notes/#Breaking-7","page":"Release notes","title":"Breaking","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"JuMP's abstraction layer for communicating with solvers changed from MathProgBase (MPB) to MathOptInterface (MOI). MOI addresses many longstanding design issues. (See @mlubin's slides from JuMP-dev 2018.) JuMP 0.19 is compatible only with solvers that have been updated for MOI. See the installation guide for a list of solvers that have and have not yet been updated.\nMost solvers have been renamed to PackageName.Optimizer. For example, GurobiSolver() is now Gurobi.Optimizer.\nSolvers are no longer added to a model via Model(solver = XXX(kwargs...)). Instead use Model(with_optimizer(XXX, kwargs...)). For example, Model(with_optimizer(Gurobi.Optimizer, OutputFlag=0)).\nJuMP containers (for example, the objects returned by @variable) have been redesigned. Containers.SparseAxisArray replaces JuMPDict, JuMPArray was rewritten (inspired by AxisArrays) and renamed Containers.DenseAxisArray, and you can now request a container type with the container= keyword to the macros. See the corresponding documentation for more details.\nThe statuses returned by solvers have changed. See the possible status values here. The MOI statuses are much richer than the MPB statuses and can be used to distinguish between previously indistinguishable cases (for example, did the solver have a feasible solution when it stopped because of the time limit?).\nStarting values are separate from result values. Use value to query the value of a variable in a solution. Use start_value and set_start_value to get and set an initial starting point provided to the solver. The solutions from previous solves are no longer automatically set as the starting points for the next solve.\nThe data structures for affine and quadratic expressions AffExpr and QuadExpr have changed. Internally, terms are stored in dictionaries instead of lists. Duplicate coefficients can no longer exist. Accessors and iteration methods have changed.\nJuMPNLPEvaluator no longer includes the linear and quadratic parts of the model in the evaluation calls. These are now handled separately to allow NLP solvers that support various types of constraints.\nJuMP solver-independent callbacks have been replaced by solver-specific callbacks. See your favorite solver for more details. (See the note below: No solver-specific callbacks are implemented yet.)\nThe norm() syntax is no longer recognized inside macros. Use the SecondOrderCone() set instead.\nJuMP no longer performs automatic transformation between special quadratic forms and second-order cone constraints. Support for these constraint classes depends on the solver.\nThe symbols :Min and :Max are no longer used as optimization senses. Instead, JuMP uses the OptimizationSense enum from MathOptInterface. @objective(model, Max, ...), @objective(model, Min, ...), @NLobjective(model, Max, ...), and @objective(model, Min, ...) remain valid, but @objective(m, :Max, ...) is no longer accepted.\nThe sign conventions for duals has changed in some cases for consistency with conic duality (see the documentation). The shadow_price helper method returns duals with signs that match conventional LP interpretations of dual values as sensitivities of the objective value to relaxations of constraints.\n@constraintref is no longer defined. Instead, create the appropriate container to hold constraint references manually. For example,\nconstraints = Dict() # Optionally, specify types for improved performance.\nfor i in 1:N\n constraints[i] = @constraint(model, ...)\nend\nThe lowerbound, upperbound, and basename keyword arguments to the @variable macro have been renamed to lower_bound, upper_bound, and base_name, for consistency with JuMP's new style recommendations.\nWe rely on broadcasting syntax to apply accessors to collections of variables, for example, value.(x) instead of getvalue(x) for collections. (Use value(x) when x is a scalar object.)","category":"page"},{"location":"release_notes/#Added-37","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Splatting (like f(x...)) is recognized in restricted settings in nonlinear expressions.\nSupport for deleting constraints and variables.\nThe documentation has been completely rewritten using docstrings and Documenter.\nSupport for modeling mixed conic and quadratic models (for example, conic models with quadratic objectives and bi-linear matrix inequalities).\nSignificantly improved support for modeling new types of constraints and for extending JuMP's macros.\nSupport for providing dual warm starts.\nImproved support for accessing solver-specific attributes (for example, the irreducible inconsistent subsystem).\nExplicit control of whether symmetry-enforcing constraints are added to PSD constraints.\nSupport for modeling exponential cones.\nSignificant improvements in internal code quality and testing.\nStyle and naming guidelines.\nDirect mode and manual mode provide explicit control over when copies of a model are stored or regenerated. See the corresponding documentation.","category":"page"},{"location":"release_notes/#Regressions","page":"Release notes","title":"Regressions","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"There are known regressions from JuMP 0.18 that will be addressed in a future release (0.19.x or later):","category":"page"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Performance regressions in model generation (issue). Please file an issue anyway if you notice a significant performance regression. We have plans to address a number of performance issues, but we might not be aware of all of them.\nFast incremental NLP solves are not yet reimplemented (issue).\nWe do not yet have an implementation of solver-specific callbacks.\nThe column generation syntax in @variable has been removed (that is, the objective, coefficients, and inconstraints keyword arguments). Support for column generation will be re-introduced in a future release.\nThe ability to solve the continuous relaxation (that is, via solve(model; relaxation = true)) is not yet reimplemented (issue).","category":"page"},{"location":"release_notes/#[Version-0.18.5](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.18.5)-(December-1,-2018)","page":"Release notes","title":"Version 0.18.5 (December 1, 2018)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Support views in some derivative evaluation functions.\nImproved compatibility with PackageCompiler.","category":"page"},{"location":"release_notes/#[Version-0.18.4](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.18.4)-(October-8,-2018)","page":"Release notes","title":"Version 0.18.4 (October 8, 2018)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix a bug in model printing on Julia 0.7 and 1.0.","category":"page"},{"location":"release_notes/#[Version-0.18.3](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.18.3)-(October-1,-2018)","page":"Release notes","title":"Version 0.18.3 (October 1, 2018)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Add support for Julia v1.0 (Thanks @ExpandingMan)\nFix matrix expressions with quadratic functions (#1508)","category":"page"},{"location":"release_notes/#[Version-0.18.2](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.18.2)-(June-10,-2018)","page":"Release notes","title":"Version 0.18.2 (June 10, 2018)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix a bug in second-order derivatives when expressions are present (#1319)\nFix a bug in @constraintref (#1330)","category":"page"},{"location":"release_notes/#[Version-0.18.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.18.1)-(April-9,-2018)","page":"Release notes","title":"Version 0.18.1 (April 9, 2018)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix for nested tuple destructuring (#1193)\nPreserve internal model when relaxation=true (#1209)\nMinor bug fixes and updates for example","category":"page"},{"location":"release_notes/#[Version-0.18.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.18.0)-(July-27,-2017)","page":"Release notes","title":"Version 0.18.0 (July 27, 2017)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Drop support for Julia 0.5.\nUpdate for ForwardDiff 0.5.\nMinor bug fixes.","category":"page"},{"location":"release_notes/#[Version-0.17.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.17.1)-(June-9,-2017)","page":"Release notes","title":"Version 0.17.1 (June 9, 2017)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Use of constructconstraint! in @SDconstraint.\nMinor bug fixes.","category":"page"},{"location":"release_notes/#[Version-0.17.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.17.0)-(May-27,-2017)","page":"Release notes","title":"Version 0.17.0 (May 27, 2017)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Breaking change: Mixing quadratic and conic constraints is no longer supported.\nBreaking change: The getvariable and getconstraint functions are replaced by indexing on the corresponding symbol. For instance, to access the variable with name x, one should now write m[:x] instead of getvariable(m, :x). As a consequence, creating a variable and constraint with the same name now triggers a warning, and accessing one of them afterwards throws an error. This change is breaking only in the latter case.\nAddition of the getobjectivebound function that mirrors the functionality of the MathProgBase getobjbound function except that it takes into account transformations performed by JuMP.\nMinor bug fixes.","category":"page"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"The following changes are primarily of interest to developers of JuMP extensions:","category":"page"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"The new syntax @constraint(model, expr in Cone) creates the constraint ensuring that expr is inside Cone. The Cone argument is passed to constructconstraint! which enables the call to the dispatched to an extension.\nThe @variable macro now calls constructvariable! instead of directly calling the Variable constructor. Extra arguments and keyword arguments passed to @variable are passed to constructvariable! which enables the call to be dispatched to an extension.\nRefactor the internal function conicdata (used build the MathProgBase conic model) into smaller sub-functions to make these parts reusable by extensions.","category":"page"},{"location":"release_notes/#[Version-0.16.2](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.16.2)-(March-28,-2017)","page":"Release notes","title":"Version 0.16.2 (March 28, 2017)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Minor bug fixes and printing tweaks\nAddress deprecation warnings for Julia 0.6","category":"page"},{"location":"release_notes/#[Version-0.16.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.16.1)-(March-7,-2017)","page":"Release notes","title":"Version 0.16.1 (March 7, 2017)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Better support for AbstractArray in JuMP (Thanks @tkoolen)\nMinor bug fixes","category":"page"},{"location":"release_notes/#[Version-0.16.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.16.0)-(February-23,-2017)","page":"Release notes","title":"Version 0.16.0 (February 23, 2017)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Breaking change: JuMP no longer has a mechanism for selecting solvers by default (the previous mechanism was flawed and incompatible with Julia 0.6). Not specifying a solver before calling solve() will result in an error.\nBreaking change: User-defined functions are no longer global. The first argument to JuMP.register is now a JuMP Model object within whose scope the function will be registered. Calling JuMP.register without a Model now produces an error.\nBreaking change: Use the new JuMP.fix method to fix a variable to a value or to update the value to which a variable is fixed. Calling setvalue on a fixed variable now results in an error in order to avoid silent behavior changes. (Thanks @joaquimg)\nNonlinear expressions now print out similarly to linear/quadratic expressions (useful for debugging!)\nNew category keyword to @variable. Used for specifying categories of anonymous variables.\nCompatibility with Julia 0.6-dev.\nMinor fixes and improvements (Thanks @cossio, @ccoffrin, @blegat)","category":"page"},{"location":"release_notes/#[Version-0.15.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.15.1)-(January-31,-2017)","page":"Release notes","title":"Version 0.15.1 (January 31, 2017)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Bugfix for @LinearConstraints and friends","category":"page"},{"location":"release_notes/#[Version-0.15.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.15.0)-(December-22,-2016)","page":"Release notes","title":"Version 0.15.0 (December 22, 2016)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Julia 0.5.0 is the minimum required version for this release.\nDocument support for BARON solver\nEnable info callbacks in more states than before, for example, for recording solutions. New when argument to addinfocallback (#814, thanks @yeesian)\nImproved support for anonymous variables. This includes new warnings for potentially confusing use of the traditional non-anonymous syntax:\nWhen multiple variables in a model are given the same name\nWhen non-symbols are used as names, for example, @variable(m, x[1][1:N])\nImprovements in iterating over JuMP containers (#836, thanks @IssamT)\nSupport for writing variable names in .lp file output (Thanks @leethargo)\nSupport for querying duals to SDP problems (Thanks @blegat)\nThe comprehension syntax with curly braces sum{}, prod{}, and norm2{} has been deprecated in favor of Julia's native comprehension syntax sum(), prod() and norm() as previously announced. (For early adopters of the new syntax, norm2() was renamed to norm() without deprecation.)\nUnit tests rewritten to use Base.Test instead of FactCheck\nImproved support for operations with matrices of JuMP types (Thanks @ExpandingMan)\nThe syntax to halt a solver from inside a callback has changed from throw(CallbackAbort()) to return JuMP.StopTheSolver\nMinor bug fixes","category":"page"},{"location":"release_notes/#[Version-0.14.2](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.14.2)-(December-12,-2016)","page":"Release notes","title":"Version 0.14.2 (December 12, 2016)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Allow singleton anonymous variables (includes bugfix)","category":"page"},{"location":"release_notes/#[Version-0.14.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.14.1)-(September-12,-2016)","page":"Release notes","title":"Version 0.14.1 (September 12, 2016)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"More consistent handling of states in informational callbacks, includes a new when parameter to addinfocallback for specifying in which state an informational callback should be called.","category":"page"},{"location":"release_notes/#[Version-0.14.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.14.0)-(August-7,-2016)","page":"Release notes","title":"Version 0.14.0 (August 7, 2016)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Compatibility with Julia 0.5 and ForwardDiff 0.2\nSupport for \"anonymous\" variables, constraints, expressions, and parameters, for example, x = @variable(m, [1:N]) instead of @variable(m, x[1:N])\nSupport for retrieving constraints from a model by name via getconstraint\n@NLconstraint now returns constraint references (as expected).\nSupport for vectorized expressions within lazy constraints\nOn Julia 0.5, parse new comprehension syntax sum(x[i] for i in 1:N if isodd(i)) instead of sum{ x[i], i in 1:N; isodd(i) }. The old syntax with curly braces will be deprecated in JuMP 0.15.\nNow possible to provide nonlinear expressions as \"raw\" Julia Expr objects instead of using JuMP's nonlinear macros. This input format is useful for programmatically generated expressions.\ns/Mathematical Programming/Mathematical Optimization/\nSupport for local cuts (Thanks to @madanim, Mehdi Madani)\nDocument Xpress interface developed by @joaquimg, Joaquim Dias Garcia\nMinor bug and deprecation fixes (Thanks @odow, @jrevels)","category":"page"},{"location":"release_notes/#[Version-0.13.2](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.13.2)-(May-16,-2016)","page":"Release notes","title":"Version 0.13.2 (May 16, 2016)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Compatibility update for MathProgBase","category":"page"},{"location":"release_notes/#[Version-0.13.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.13.1)-(May-3,-2016)","page":"Release notes","title":"Version 0.13.1 (May 3, 2016)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix broken deprecation for registerNLfunction.","category":"page"},{"location":"release_notes/#[Version-0.13.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.13.0)-(April-29,-2016)","page":"Release notes","title":"Version 0.13.0 (April 29, 2016)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Most exported methods and macros have been renamed to avoid camelCase. See the list of changes here. There is a 1-1 mapping from the old names to the new, and it is safe to simply replace the names to update existing models.\nSpecify variable lower/upper bounds in @variable using the lowerbound and upperbound keyword arguments.\nChange name printed for variable using the basename keyword argument to @variable.\nNew @variables macro allows multi-line declaration of groups of variables.\nA number of solver methods previously available only through MathProgBase are now exposed directly in JuMP. The fix was recorded live.\nCompatibility fixes with Julia 0.5.\nThe \"end\" indexing syntax is no longer supported within JuMPArrays which do not use 1-based indexing until upstream issues are resolved, see here.","category":"page"},{"location":"release_notes/#[Version-0.12.2](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.12.2)-(March-9,-2016)","page":"Release notes","title":"Version 0.12.2 (March 9, 2016)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Small fixes for nonlinear optimization","category":"page"},{"location":"release_notes/#[Version-0.12.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.12.1)-(March-1,-2016)","page":"Release notes","title":"Version 0.12.1 (March 1, 2016)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix a regression in slicing for JuMPArrays (when not using 1-based indexing)","category":"page"},{"location":"release_notes/#[Version-0.12.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.12.0)-(February-27,-2016)","page":"Release notes","title":"Version 0.12.0 (February 27, 2016)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"The automatic differentiation functionality has been completely rewritten with a number of user-facing changes:\n@defExpr and @defNLExpr now take the model as the first argument. The previous one-argument version of @defExpr is deprecated; all expressions should be named. For example, replace @defExpr(2x+y) with @defExpr(jump_model, my_expr, 2x+y).\nJuMP no longer uses Julia's variable binding rules for efficiently re-solving a sequence of nonlinear models. Instead, we have introduced nonlinear parameters. This is a breaking change, so we have added a warning message when we detect models that may depend on the old behavior.\nSupport for user-defined functions integrated within nonlinear JuMP expressions.\nReplaced iteration over AffExpr with Number-like scalar iteration; previous iteration behavior is now available via linearterms(::AffExpr).\nStopping the solver via throw(CallbackAbort()) from a callback no longer triggers an exception. Instead, solve() returns UserLimit status.\ngetDual() now works for conic problems (Thanks @emreyamangil.)","category":"page"},{"location":"release_notes/#[Version-0.11.3](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.11.3)-(February-4,-2016)","page":"Release notes","title":"Version 0.11.3 (February 4, 2016)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Bug-fix for problems with quadratic objectives and semidefinite constraints","category":"page"},{"location":"release_notes/#[Version-0.11.2](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.11.2)-(January-14,-2016)","page":"Release notes","title":"Version 0.11.2 (January 14, 2016)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Compatibility update for Mosek","category":"page"},{"location":"release_notes/#[Version-0.11.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.11.1)-(December-1,-2015)","page":"Release notes","title":"Version 0.11.1 (December 1, 2015)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Remove usage of @compat in tests.\nFix updating quadratic objectives for nonlinear models.","category":"page"},{"location":"release_notes/#[Version-0.11.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.11.0)-(November-30,-2015)","page":"Release notes","title":"Version 0.11.0 (November 30, 2015)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Julia 0.4.0 is the minimum required version for this release.\nFix for scoping semantics of index variables in sum{}. Index variables no longer leak into the surrounding scope.\nAddition of the solve(m::Model, relaxation=true) keyword argument to solve the standard continuous relaxation of model m\nThe getConstraintBounds() method allows access to the lower and upper bounds of all constraints in a (nonlinear) model.\nUpdate for breaking changes in MathProgBase","category":"page"},{"location":"release_notes/#[Version-0.10.3](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.10.3)-(November-20,-2015)","page":"Release notes","title":"Version 0.10.3 (November 20, 2015)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix a rare error when parsing quadratic expressions\nFix Variable() constructor with default arguments\nDetect unrecognized keywords in solve()","category":"page"},{"location":"release_notes/#[Version-0.10.2](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.10.2)-(September-28,-2015)","page":"Release notes","title":"Version 0.10.2 (September 28, 2015)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix for deprecation warnings","category":"page"},{"location":"release_notes/#[Version-0.10.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.10.1)-(September-3,-2015)","page":"Release notes","title":"Version 0.10.1 (September 3, 2015)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixes for ambiguity warnings.\nFix for breaking change in precompilation syntax in Julia 0.4-pre","category":"page"},{"location":"release_notes/#[Version-0.10.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.10.0)-(August-31,-2015)","page":"Release notes","title":"Version 0.10.0 (August 31, 2015)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Support (on Julia 0.4 and later) for conditions in indexing @defVar and @addConstraint constructs, for example, @defVar(m, x[i=1:5,j=1:5; i+j >= 3])\nSupport for vectorized operations on Variables and expressions. See the documentation for details.\nNew getVar() method to access variables in a model by name\nSupport for semidefinite programming.\nDual solutions are now available for general nonlinear problems. You may call getDual on a reference object for a nonlinear constraint, and getDual on a variable object for Lagrange multipliers from active bounds.\nIntroduce warnings for two common performance traps: too many calls to getValue() on a collection of variables and use of the + operator in a loop to sum expressions.\nSecond-order cone constraints can be written directly with the norm() and norm2{} syntax.\nImplement MathProgBase interface for querying Hessian-vector products.\nIteration over JuMPContainers is deprecated; instead, use the keys and values functions, and zip(keys(d),values(d)) for the old behavior.\n@defVar returns Array{Variable,N} when each of N index sets are of the form 1:nᵢ.\nModule precompilation: on Julia 0.4 and later, using JuMP is now much faster.","category":"page"},{"location":"release_notes/#[Version-0.9.3](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.9.3)-(August-11,-2015)","page":"Release notes","title":"Version 0.9.3 (August 11, 2015)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixes for FactCheck testing on julia v0.4.","category":"page"},{"location":"release_notes/#[Version-0.9.2](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.9.2)-(June-27,-2015)","page":"Release notes","title":"Version 0.9.2 (June 27, 2015)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix bug in @addConstraints.","category":"page"},{"location":"release_notes/#[Version-0.9.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.9.1)-(April-25,-2015)","page":"Release notes","title":"Version 0.9.1 (April 25, 2015)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix for Julia 0.4-dev.\nSmall infrastructure improvements for extensions.","category":"page"},{"location":"release_notes/#[Version-0.9.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.9.0)-(April-18,-2015)","page":"Release notes","title":"Version 0.9.0 (April 18, 2015)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Comparison operators for constructing constraints (for example, 2x >= 1) have been deprecated. Instead, construct the constraints explicitly in the @addConstraint macro to add them to the model, or in the @LinearConstraint macro to create a stand-alone linear constraint instance.\ngetValue() method implemented to compute the value of a nonlinear subexpression\nJuMP is now released under the Mozilla Public License version 2.0 (was previously LGPL). MPL is a copyleft license which is less restrictive than LGPL, especially for embedding JuMP within other applications.\nA number of performance improvements in ReverseDiffSparse for computing derivatives.\nMathProgBase.getsolvetime(m) now returns the solution time reported by the solver, if available. (Thanks @odow, Oscar Dowson)\nFormatting fix for LP format output. (Thanks @sbebo, Leonardo Taccari).","category":"page"},{"location":"release_notes/#[Version-0.8.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.8.0)-(February-17,-2015)","page":"Release notes","title":"Version 0.8.0 (February 17, 2015)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Nonlinear subexpressions now supported with the @defNLExpr macro.\nSCS supported for solving second-order conic problems.\nsetXXXCallback family deprecated in favor of addXXXCallback.\nMultiple callbacks of the same type can be registered.\nAdded support for informational callbacks via addInfoCallback.\nA CallbackAbort exception can be thrown from callback to safely exit optimization.","category":"page"},{"location":"release_notes/#[Version-0.7.4](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.7.4)-(February-4,-2015)","page":"Release notes","title":"Version 0.7.4 (February 4, 2015)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Reduced costs and linear constraint duals are now accessible when quadratic constraints are present.\nTwo-sided nonlinear constraints are supported.\nMethods for accessing the number of variables and constraints in a model are renamed.\nNew default procedure for setting initial values in nonlinear optimization: project zero onto the variable bounds.\nSmall bug fixes.","category":"page"},{"location":"release_notes/#[Version-0.7.3](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.7.3)-(January-14,-2015)","page":"Release notes","title":"Version 0.7.3 (January 14, 2015)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix a method ambiguity conflict with Compose.jl (cosmetic fix)","category":"page"},{"location":"release_notes/#[Version-0.7.2](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.7.2)-(January-9,-2015)","page":"Release notes","title":"Version 0.7.2 (January 9, 2015)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix a bug in sum(::JuMPDict)\nAdded the setCategory function to change a variables category (for example, continuous or binary)","category":"page"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"after construction, and getCategory to retrieve the variable category.","category":"page"},{"location":"release_notes/#[Version-0.7.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.7.1)-(January-2,-2015)","page":"Release notes","title":"Version 0.7.1 (January 2, 2015)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix a bug in parsing linear expressions in macros. Affects only Julia 0.4 and later.","category":"page"},{"location":"release_notes/#[Version-0.7.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.7.0)-(December-29,-2014)","page":"Release notes","title":"Version 0.7.0 (December 29, 2014)","text":"","category":"section"},{"location":"release_notes/#Linear/quadratic/conic-programming","page":"Release notes","title":"Linear/quadratic/conic programming","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Breaking change: The syntax for column-wise model generation has been changed to use keyword arguments in @defVar.\nOn Julia 0.4 and later, variables and coefficients may be multiplied in any order within macros. That is, variable*coefficient is now valid syntax.\nECOS supported for solving second-order conic problems.","category":"page"},{"location":"release_notes/#_nonlinear_programming_release_notes","page":"Release notes","title":"Nonlinear programming","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Support for skipping model generation when solving a sequence of nonlinear models with changing data.\nFix a memory leak when solving a sequence of nonlinear models.\nThe @addNLConstraint macro now supports the three-argument version to define sets of nonlinear constraints.\nKNITRO supported as a nonlinear solver.\nSpeed improvements for model generation.\nThe @addNLConstraints macro supports adding multiple (groups of) constraints at once. Syntax is similar to @addConstraints.\nDiscrete variables allowed in nonlinear problems for solvers which support them (currently only KNITRO).","category":"page"},{"location":"release_notes/#General","page":"Release notes","title":"General","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Starting values for variables may now be specified with @defVar(m, x, start=value).\nThe setSolver function allows users to change the solver subsequent to model creation.\nSupport for \"fixed\" variables via the @defVar(m, x == 1) syntax.\nUnit tests rewritten to use FactCheck.jl, improved testing across solvers.","category":"page"},{"location":"release_notes/#[Version-0.6.3](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.6.3)-(October-19,-2014)","page":"Release notes","title":"Version 0.6.3 (October 19, 2014)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix a bug in multiplying two AffExpr objects.","category":"page"},{"location":"release_notes/#[Version-0.6.2](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.6.2)-(October-11,-2014)","page":"Release notes","title":"Version 0.6.2 (October 11, 2014)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Further improvements and bug fixes for printing.\nFixed a bug in @defExpr.\nSupport for accessing expression graphs through the MathProgBase NLP interface.","category":"page"},{"location":"release_notes/#[Version-0.6.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.6.1)-(September-19,-2014)","page":"Release notes","title":"Version 0.6.1 (September 19, 2014)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Improvements and bug fixes for printing.","category":"page"},{"location":"release_notes/#[Version-0.6.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.6.0)-(September-9,-2014)","page":"Release notes","title":"Version 0.6.0 (September 9, 2014)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Julia 0.3.0 is the minimum required version for this release.\nbuildInternalModel(m::Model) added to build solver-level model in memory without optimizing.\nDeprecate load_model_only keyword argument to solve.\nAdd groups of constraints with @addConstraints macro.\nUnicode operators now supported, including ∑ for sum, ∏ for prod, and ≤/≥\nQuadratic constraints supported in @addConstraint macro.\nQuadratic objectives supported in @setObjective macro.\nMathProgBase solver-independent interface replaces Ipopt-specific interface for nonlinear problems\nBreaking change: IpoptOptions no longer supported to specify solver options, use m = Model(solver=IpoptSolver(options...)) instead.\nNew solver interfaces: ECOS, NLopt, and nonlinear support for MOSEK\nNew option to control whether the lazy constraint callback is executed at each node in the B&B tree or just when feasible solutions are found\nAdd support for semicontinuous and semi-integer variables for those solvers that support them.\nAdd support for index dependencies (for example, triangular indexing) in @defVar, @addConstraint, and @defExpr (for example, @defVar(m, x[i=1:10,j=i:10])).\nThis required some changes to the internal structure of JuMP containers, which may break code that explicitly stored JuMPDict objects.","category":"page"},{"location":"release_notes/#[Version-0.5.8](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.5.8)-(September-24,-2014)","page":"Release notes","title":"Version 0.5.8 (September 24, 2014)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix a bug with specifying solvers (affects Julia 0.2 only)","category":"page"},{"location":"release_notes/#[Version-0.5.7](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.5.7)-(September-5,-2014)","page":"Release notes","title":"Version 0.5.7 (September 5, 2014)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix a bug in printing models","category":"page"},{"location":"release_notes/#[Version-0.5.6](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.5.6)-(September-2,-2014)","page":"Release notes","title":"Version 0.5.6 (September 2, 2014)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Add support for semicontinuous and semi-integer variables for those solvers that support them.\nBreaking change: Syntax for Variable() constructor has changed (use of this interface remains discouraged)\nUpdate for breaking changes in MathProgBase","category":"page"},{"location":"release_notes/#[Version-0.5.5](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.5.5)-(July-6,-2014)","page":"Release notes","title":"Version 0.5.5 (July 6, 2014)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix bug with problem modification: adding variables that did not appear in existing constraints or objective.","category":"page"},{"location":"release_notes/#[Version-0.5.4](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.5.4)-(June-19,-2014)","page":"Release notes","title":"Version 0.5.4 (June 19, 2014)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Update for breaking change in MathProgBase which reduces loading times for using JuMP\nFix error when MIPs not solved to optimality","category":"page"},{"location":"release_notes/#[Version-0.5.3](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.5.3)-(May-21,-2014)","page":"Release notes","title":"Version 0.5.3 (May 21, 2014)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Update for breaking change in ReverseDiffSparse","category":"page"},{"location":"release_notes/#[Version-0.5.2](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.5.2)-(May-9,-2014)","page":"Release notes","title":"Version 0.5.2 (May 9, 2014)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix compatibility with Julia 0.3 prerelease","category":"page"},{"location":"release_notes/#[Version-0.5.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.5.1)-(May-5,-2014)","page":"Release notes","title":"Version 0.5.1 (May 5, 2014)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix a bug in coefficient handling inside lazy constraints and user cuts","category":"page"},{"location":"release_notes/#[Version-0.5.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.5.0)-(May-2,-2014)","page":"Release notes","title":"Version 0.5.0 (May 2, 2014)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Support for nonlinear optimization with exact, sparse second-order derivatives automatically computed. Ipopt is currently the only solver supported.\ngetValue for AffExpr and QuadExpr\nBreaking change: getSolverModel replaced by getInternalModel, which returns the internal MathProgBase-level model\nGroups of constraints can be specified with @addConstraint (see documentation for details). This is not a breaking change.\ndot(::JuMPDict{Variable},::JuMPDict{Variable}) now returns the corresponding quadratic expression.","category":"page"},{"location":"release_notes/#[Version-0.4.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.4.1)-(March-24,-2014)","page":"Release notes","title":"Version 0.4.1 (March 24, 2014)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix bug where change in objective sense was ignored when re-solving a model.\nFix issue with handling zero coefficients in AffExpr.","category":"page"},{"location":"release_notes/#[Version-0.4.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.4.0)-(March-10,-2014)","page":"Release notes","title":"Version 0.4.0 (March 10, 2014)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Support for SOS1 and SOS2 constraints.\nSolver-independent callback for user heuristics.\ndot and sum implemented for JuMPDict objects. Now you can say @addConstraint(m, dot(a,x) <= b).\nDevelopers: support for extensions to JuMP. See definition of Model in src/JuMP.jl for more details.\nOption to construct the low-level model before optimizing.","category":"page"},{"location":"release_notes/#[Version-0.3.2](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.3.2)-(February-17,-2014)","page":"Release notes","title":"Version 0.3.2 (February 17, 2014)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Improved model printing\nPreliminary support for IJulia output","category":"page"},{"location":"release_notes/#[Version-0.3.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.3.1)-(January-30,-2014)","page":"Release notes","title":"Version 0.3.1 (January 30, 2014)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Documentation updates\nSupport for MOSEK\nCPLEXLink renamed to CPLEX","category":"page"},{"location":"release_notes/#[Version-0.3.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.3.0)-(January-21,-2014)","page":"Release notes","title":"Version 0.3.0 (January 21, 2014)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Unbounded/infeasibility rays: getValue() will return the corresponding components of an unbounded ray when a model is unbounded, if supported by the selected solver. getDual() will return an infeasibility ray (Farkas proof) if a model is infeasible and the selected solver supports this feature.\nSolver-independent callbacks for user generated cuts.\nUse new interface for solver-independent QCQP.\nsetlazycallback renamed to setLazyCallback for consistency.","category":"page"},{"location":"release_notes/#[Version-0.2.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.2.0)-(December-15,-2013)","page":"Release notes","title":"Version 0.2.0 (December 15, 2013)","text":"","category":"section"},{"location":"release_notes/#Breaking-8","page":"Release notes","title":"Breaking","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Objective sense is specified in setObjective instead of in the Model constructor.\nlpsolver and mipsolver merged into single solver option.","category":"page"},{"location":"release_notes/#Added-38","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Problem modification with efficient LP restarts and MIP warm-starts.\nRelatedly, column-wise modeling now supported.\nSolver-independent callbacks supported. Currently we support only a \"lazy constraint\" callback, which works with Gurobi, CPLEX, and GLPK. More callbacks coming soon.","category":"page"},{"location":"release_notes/#[Version-0.1.2](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.1.2)-(November-16,-2013)","page":"Release notes","title":"Version 0.1.2 (November 16, 2013)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Bug fixes for printing, improved error messages.\nAllow AffExpr to be used in macros; for example, ex = y + z; @addConstraint(m, x + 2*ex <= 3)","category":"page"},{"location":"release_notes/#[Version-0.1.1](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.1.1)-(October-23,-2013)","page":"Release notes","title":"Version 0.1.1 (October 23, 2013)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Update for solver specification API changes in MathProgBase.","category":"page"},{"location":"release_notes/#[Version-0.1.0](https://github.com/jump-dev/JuMP.jl/releases/tag/v0.1.0)-(October-3,-2013)","page":"Release notes","title":"Version 0.1.0 (October 3, 2013)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Initial public release.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"CurrentModule = JuMP\nDocTestSetup = quote\n using JuMP\n import HiGHS\nend\nDocTestFilters = [r\"≤|<=\", r\"≥|>=\", r\" == | = \", r\" ∈ | in \", r\"MathOptInterface|MOI\"]","category":"page"},{"location":"manual/constraints/#jump_constraints","page":"Constraints","title":"Constraints","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"JuMP is based on the MathOptInterface (MOI) API. Because of this, JuMP uses the following standard form to represent problems:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"beginalign\n min_x in mathbbR^n f_0(x)\n \n textst f_i(x) in mathcalS_i i = 1 ldots m\nendalign","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Each constraint, f_i(x) in mathcalS_i, is composed of a function and a set. For example, instead of calling a^top x le b a less-than-or-equal-to constraint, we say that it is a scalar-affine-in-less-than constraint, where the function a^top x belongs to the less-than set (-infty b. We use the shorthand function-in-set to refer to constraints composed of different types of functions and sets.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"This page explains how to write various types of constraints in JuMP. For nonlinear constraints, see Nonlinear Modeling instead.","category":"page"},{"location":"manual/constraints/#Add-a-constraint","page":"Constraints","title":"Add a constraint","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Add a constraint to a JuMP model using the @constraint macro. The syntax to use depends on the type of constraint you wish to add.","category":"page"},{"location":"manual/constraints/#Add-a-linear-constraint","page":"Constraints","title":"Add a linear constraint","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Create linear constraints using the @constraint macro:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x[1:3]);\n\njulia> @constraint(model, c1, sum(x) <= 1)\nc1 : x[1] + x[2] + x[3] ≤ 1\n\njulia> @constraint(model, c2, x[1] + 2 * x[3] >= 2)\nc2 : x[1] + 2 x[3] ≥ 2\n\njulia> @constraint(model, c3, sum(i * x[i] for i in 1:3) == 3)\nc3 : x[1] + 2 x[2] + 3 x[3] = 3\n\njulia> @constraint(model, c4, 4 <= 2 * x[2] <= 5)\nc4 : 2 x[2] ∈ [4, 5]","category":"page"},{"location":"manual/constraints/#Normalization","page":"Constraints","title":"Normalization","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"JuMP normalizes constraints by moving all of the terms containing variables to the left-hand side and all of the constant terms to the right-hand side. Thus, we get:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x);\n\njulia> @constraint(model, c, 2x + 1 <= 4x + 4)\nc : -2 x ≤ 3","category":"page"},{"location":"manual/constraints/#quad_constraints","page":"Constraints","title":"Add a quadratic constraint","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"In addition to affine functions, JuMP also supports constraints with quadratic terms. For example:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x[i=1:2])\n2-element Vector{VariableRef}:\n x[1]\n x[2]\n\njulia> @variable(model, t >= 0)\nt\n\njulia> @constraint(model, my_q, x[1]^2 + x[2]^2 <= t^2)\nmy_q : x[1]² + x[2]² - t² ≤ 0","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"tip: Tip\nBecause solvers can take advantage of the knowledge that a constraint is quadratic, prefer adding quadratic constraints using @constraint, rather than @NLconstraint.","category":"page"},{"location":"manual/constraints/#Vectorized-constraints","page":"Constraints","title":"Vectorized constraints","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"You can also add constraints to JuMP using vectorized linear algebra. For example:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x[i=1:2])\n2-element Vector{VariableRef}:\n x[1]\n x[2]\n\njulia> A = [1 2; 3 4]\n2×2 Matrix{Int64}:\n 1 2\n 3 4\n\njulia> b = [5, 6]\n2-element Vector{Int64}:\n 5\n 6\n\njulia> @constraint(model, con_vector, A * x == b)\ncon_vector : [x[1] + 2 x[2] - 5, 3 x[1] + 4 x[2] - 6] ∈ Zeros()\n\njulia> @constraint(model, con_scalar, A * x .== b)\n2-element Vector{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{Float64}, MathOptInterface.EqualTo{Float64}}, ScalarShape}}:\n con_scalar : x[1] + 2 x[2] = 5\n con_scalar : 3 x[1] + 4 x[2] = 6","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"The two constraints, == and .== are similar, but subtly different. The first creates a single constraint that is a MOI.VectorAffineFunction in MOI.Zeros constraint. The second creates a vector of MOI.ScalarAffineFunction in MOI.EqualTo constraints.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Which formulation to choose depends on the solver, and what you want to do with the constraint object con_vector or con_scalar.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"If you are using a conic solver, expect the dual of con_vector to be a Vector{Float64}, and do not intend to delete a row in the constraint, choose the == formulation.\nIf you are using a solver that expects a list of scalar constraints, for example HiGHS, or you wish to delete part of the constraint or access a single row of the constraint, for example, dual(con_scalar[2]), then use the broadcast .==.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"JuMP reformulates both constraints into the other form if needed by the solver, but choosing the right format for a particular solver is more efficient.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"You can also use <=, .<= , >=, and .>= as comparison operators in the constraint.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> @constraint(model, A * x <= b)\n[x[1] + 2 x[2] - 5, 3 x[1] + 4 x[2] - 6] ∈ Nonpositives()\n\njulia> @constraint(model, A * x .<= b)\n2-element Vector{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{Float64}, MathOptInterface.LessThan{Float64}}, ScalarShape}}:\n x[1] + 2 x[2] ≤ 5\n 3 x[1] + 4 x[2] ≤ 6\n\njulia> @constraint(model, A * x >= b)\n[x[1] + 2 x[2] - 5, 3 x[1] + 4 x[2] - 6] ∈ Nonnegatives()\n\njulia> @constraint(model, A * x .>= b)\n2-element Vector{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{Float64}, MathOptInterface.GreaterThan{Float64}}, ScalarShape}}:\n x[1] + 2 x[2] ≥ 5\n 3 x[1] + 4 x[2] ≥ 6","category":"page"},{"location":"manual/constraints/#Matrix-inequalities","page":"Constraints","title":"Matrix inequalities","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Inequalities between matrices are not supported, due to the common ambiguity between elementwise inequalities and a PSDCone constraint.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x[1:2, 1:2], Symmetric);\n\njulia> @variable(model, y[1:2, 1:2], Symmetric);\n\njulia> @constraint(model, x >= y)\nERROR: At none:1: `@constraint(model, x >= y)`:\nThe syntax `x >= y` is ambiguous for matrices because we cannot tell if\nyou intend a positive semidefinite constraint or an elementwise\ninequality.\n\nTo create a positive semidefinite constraint, pass `PSDCone()` or\n`HermitianPSDCone()`:\n\n```julia\n@constraint(model, x >= y, PSDCone())\n```\n\nTo create an element-wise inequality, pass `Nonnegatives()`, or use\nbroadcasting:\n\n```julia\n@constraint(model, x >= y, Nonnegatives())\n# or\n@constraint(model, x .>= y)\n```\nStacktrace:\n[...]","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Instead, use the Set inequality syntax to specify a set like PSDCone or Nonnegatives:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> @constraint(model, x >= y, PSDCone())\n[x[1,1] - y[1,1] x[1,2] - y[1,2]\n ⋯ x[2,2] - y[2,2]] ∈ PSDCone()\n\njulia> @constraint(model, x >= y, Nonnegatives())\n[x[1,1] - y[1,1] x[1,2] - y[1,2]\n ⋯ x[2,2] - y[2,2]] ∈ Nonnegatives()\n\njulia> @constraint(model, x >= y, Nonpositives())\n[x[1,1] - y[1,1] x[1,2] - y[1,2]\n ⋯ x[2,2] - y[2,2]] ∈ Nonpositives()\n\njulia> @constraint(model, x >= y, Zeros())\n[x[1,1] - y[1,1] x[1,2] - y[1,2]\n ⋯ x[2,2] - y[2,2]] ∈ Zeros()","category":"page"},{"location":"manual/constraints/#Special-cases","page":"Constraints","title":"Special cases","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"There are two exceptions: if the result of the left-hand side minus the right-hand side is a LinearAlgebra.Symmetric matrix or a LinearAlgebra.Hermitian matrix, you may use the non-broadcasting equality syntax:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> using LinearAlgebra\n\njulia> model = Model();\n\njulia> @variable(model, X[1:2, 1:2], Symmetric)\n2×2 Symmetric{VariableRef, Matrix{VariableRef}}:\n X[1,1] X[1,2]\n X[1,2] X[2,2]\n\njulia> @constraint(model, X == LinearAlgebra.I)\n[X[1,1] - 1 X[1,2]\n ⋯ X[2,2] - 1] ∈ Zeros()","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"This will add only three rows to the constraint matrix because the symmetric constraints are redundant. In contrast, the broadcasting syntax adds four linear constraints:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> @constraint(model, X .== LinearAlgebra.I)\n2×2 Matrix{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{Float64}, MathOptInterface.EqualTo{Float64}}, ScalarShape}}:\n X[1,1] = 1 X[1,2] = 0\n X[1,2] = 0 X[2,2] = 1","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"The same holds for LinearAlgebra.Hermitian matrices:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> using LinearAlgebra\n\njulia> model = Model();\n\njulia> @variable(model, X[1:2, 1:2] in HermitianPSDCone())\n2×2 Hermitian{GenericAffExpr{ComplexF64, VariableRef}, Matrix{GenericAffExpr{ComplexF64, VariableRef}}}:\n real(X[1,1]) real(X[1,2]) + imag(X[1,2]) im\n real(X[1,2]) - imag(X[1,2]) im real(X[2,2])\n\njulia> @constraint(model, X == LinearAlgebra.I)\n[real(X[1,1]) - 1 real(X[1,2]) + imag(X[1,2]) im\n real(X[1,2]) - imag(X[1,2]) im real(X[2,2]) - 1] ∈ Zeros()\n\njulia> @constraint(model, X .== LinearAlgebra.I)\n2×2 Matrix{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{ComplexF64}, MathOptInterface.EqualTo{ComplexF64}}, ScalarShape}}:\n real(X[1,1]) = 1 real(X[1,2]) + imag(X[1,2]) im = 0\n real(X[1,2]) - imag(X[1,2]) im = 0 real(X[2,2]) = 1","category":"page"},{"location":"manual/constraints/#Containers-of-constraints","page":"Constraints","title":"Containers of constraints","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"The @constraint macro supports creating collections of constraints. We'll cover some brief syntax here; read the Constraint containers section for more details:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Create arrays of constraints:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x[1:3]);\n\njulia> @constraint(model, c[i=1:3], x[i] <= i^2)\n3-element Vector{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{Float64}, MathOptInterface.LessThan{Float64}}, ScalarShape}}:\n c[1] : x[1] ≤ 1\n c[2] : x[2] ≤ 4\n c[3] : x[3] ≤ 9\n\njulia> c[2]\nc[2] : x[2] ≤ 4","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Sets can be any Julia type that supports iteration:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x[1:3]);\n\njulia> @constraint(model, c[i=2:3, [\"red\", \"blue\"]], x[i] <= i^2)\n2-dimensional DenseAxisArray{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{Float64}, MathOptInterface.LessThan{Float64}}, ScalarShape},2,...} with index sets:\n Dimension 1, 2:3\n Dimension 2, [\"red\", \"blue\"]\nAnd data, a 2×2 Matrix{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{Float64}, MathOptInterface.LessThan{Float64}}, ScalarShape}}:\n c[2,red] : x[2] ≤ 4 c[2,blue] : x[2] ≤ 4\n c[3,red] : x[3] ≤ 9 c[3,blue] : x[3] ≤ 9\n\njulia> c[2, \"red\"]\nc[2,red] : x[2] ≤ 4","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Sets can depend upon previous indices:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x[1:3]);\n\njulia> @constraint(model, c[i=1:3, j=i:3], x[i] <= j)\nJuMP.Containers.SparseAxisArray{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{Float64}, MathOptInterface.LessThan{Float64}}, ScalarShape}, 2, Tuple{Int64, Int64}} with 6 entries:\n [1, 1] = c[1,1] : x[1] ≤ 1\n [1, 2] = c[1,2] : x[1] ≤ 2\n [1, 3] = c[1,3] : x[1] ≤ 3\n [2, 2] = c[2,2] : x[2] ≤ 2\n [2, 3] = c[2,3] : x[2] ≤ 3\n [3, 3] = c[3,3] : x[3] ≤ 3","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"and you can filter elements in the sets using the ; syntax:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x[1:9]);\n\njulia> @constraint(model, c[i=1:9; mod(i, 3) == 0], x[i] <= i)\nJuMP.Containers.SparseAxisArray{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{Float64}, MathOptInterface.LessThan{Float64}}, ScalarShape}, 1, Tuple{Int64}} with 3 entries:\n [3] = c[3] : x[3] ≤ 3\n [6] = c[6] : x[6] ≤ 6\n [9] = c[9] : x[9] ≤ 9","category":"page"},{"location":"manual/constraints/#Registered-constraints","page":"Constraints","title":"Registered constraints","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"When you create constraints, JuMP registers them inside the model using their corresponding symbol. Get a registered name using model[:key]:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model()\nA JuMP Model\n├ solver: none\n├ objective_sense: FEASIBILITY_SENSE\n├ num_variables: 0\n├ num_constraints: 0\n└ Names registered in the model: none\n\njulia> @variable(model, x)\nx\n\njulia> @constraint(model, my_c, 2x <= 1)\nmy_c : 2 x ≤ 1\n\njulia> model\nA JuMP Model\n├ solver: none\n├ objective_sense: FEASIBILITY_SENSE\n├ num_variables: 1\n├ num_constraints: 1\n│ └ AffExpr in MOI.LessThan{Float64}: 1\n└ Names registered in the model\n └ :my_c, :x\n\njulia> model[:my_c] === my_c\ntrue","category":"page"},{"location":"manual/constraints/#Anonymous-constraints","page":"Constraints","title":"Anonymous constraints","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"To reduce the likelihood of accidental bugs, and because JuMP registers constraints inside a model, creating two constraints with the same name is an error:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x)\nx\n\njulia> @constraint(model, c, 2x <= 1)\nc : 2 x ≤ 1\n\njulia> @constraint(model, c, 2x <= 1)\nERROR: An object of name c is already attached to this model. If this\n is intended, consider using the anonymous construction syntax, for example,\n `x = @variable(model, [1:N], ...)` where the name of the object does\n not appear inside the macro.\n\n Alternatively, use `unregister(model, :c)` to first unregister\n the existing name from the model. Note that this will not delete the\n object; it will just remove the reference at `model[:c]`.\n[...]","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"A common reason for encountering this error is adding constraints in a loop.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"As a work-around, JuMP provides anonymous constraints. Create an anonymous constraint by omitting the name argument:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x);\n\njulia> c = @constraint(model, 2x <= 1)\n2 x ≤ 1","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Create a container of anonymous constraints by dropping the name in front of the [:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x[1:3]);\n\njulia> c = @constraint(model, [i = 1:3], x[i] <= i)\n3-element Vector{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{Float64}, MathOptInterface.LessThan{Float64}}, ScalarShape}}:\n x[1] ≤ 1\n x[2] ≤ 2\n x[3] ≤ 3","category":"page"},{"location":"manual/constraints/#Constraint-names","page":"Constraints","title":"Constraint names","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"In addition to the symbol that constraints are registered with, constraints have a String name that is used for printing and writing to file formats.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Get and set the name of a constraint using name(::JuMP.ConstraintRef) and set_name(::JuMP.ConstraintRef, ::String):","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model(); @variable(model, x);\n\njulia> @constraint(model, con, x <= 1)\ncon : x ≤ 1\n\njulia> name(con)\n\"con\"\n\njulia> set_name(con, \"my_con_name\")\n\njulia> con\nmy_con_name : x ≤ 1","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Override the default choice of name using the base_name keyword:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model(); @variable(model, x);\n\njulia> con = @constraint(model, [i=1:2], x <= i, base_name = \"my_con\")\n2-element Vector{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{Float64}, MathOptInterface.LessThan{Float64}}, ScalarShape}}:\n my_con[1] : x ≤ 1\n my_con[2] : x ≤ 2","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Note that names apply to each element of the container, not to the container of constraints:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> name(con[1])\n\"my_con[1]\"\n\njulia> set_name(con[1], \"c\")\n\njulia> con\n2-element Vector{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{Float64}, MathOptInterface.LessThan{Float64}}, ScalarShape}}:\n c : x ≤ 1\n my_con[2] : x ≤ 2","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"tip: Tip\nFor some models, setting the string name of each constraint can take a non-trivial portion of the total time required to build the model. Turn off String names by passing set_string_name = false to @constraint:julia> model = Model();\n\njulia> @variable(model, x);\n\njulia> @constraint(model, con, x <= 2, set_string_name = false)\nx ≤ 2See Disable string names for more information.","category":"page"},{"location":"manual/constraints/#Retrieve-a-constraint-by-name","page":"Constraints","title":"Retrieve a constraint by name","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Retrieve a constraint from a model using constraint_by_name:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> constraint_by_name(model, \"c\")\nc : x ≤ 1","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"If the name is not present, nothing will be returned:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> constraint_by_name(model, \"bad_name\")","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"You can only look up individual constraints using constraint_by_name. Something like this will not work:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model(); @variable(model, x);\n\njulia> con = @constraint(model, [i=1:2], x <= i, base_name = \"my_con\")\n2-element Vector{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{Float64}, MathOptInterface.LessThan{Float64}}, ScalarShape}}:\n my_con[1] : x ≤ 1\n my_con[2] : x ≤ 2\n\njulia> constraint_by_name(model, \"my_con\")","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"To look up a collection of constraints, do not use constraint_by_name. Instead, register them using the model[:key] = value syntax:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model(); @variable(model, x);\n\njulia> model[:con] = @constraint(model, [i=1:2], x <= i, base_name = \"my_con\")\n2-element Vector{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{Float64}, MathOptInterface.LessThan{Float64}}, ScalarShape}}:\n my_con[1] : x ≤ 1\n my_con[2] : x ≤ 2\n\njulia> model[:con]\n2-element Vector{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{Float64}, MathOptInterface.LessThan{Float64}}, ScalarShape}}:\n my_con[1] : x ≤ 1\n my_con[2] : x ≤ 2","category":"page"},{"location":"manual/constraints/#String-names,-symbolic-names,-and-bindings","page":"Constraints","title":"String names, symbolic names, and bindings","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"It's common for new users to experience confusion relating to constraints. Part of the problem is the difference between the name that a constraint is registered under and the String name used for printing.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Here's a summary of the differences:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Constraints are created using @constraint.\nConstraints can be named or anonymous.\nNamed constraints have the form @constraint(model, c, expr). For named constraints:\nThe String name of the constraint is set to \"c\".\nA Julia variable c is created that binds c to the JuMP constraint.\nThe name :c is registered as a key in the model with the value c.\nAnonymous constraints have the form c = @constraint(model, expr). For anonymous constraints:\nThe String name of the constraint is set to \"\".\nYou control the name of the Julia variable used as the binding.\nNo name is registered as a key in the model.\nThe base_name keyword can override the String name of the constraint.\nYou can manually register names in the model via model[:key] = value.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Here's an example of the differences:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x)\nx\n\njulia> c_binding = @constraint(model, 2x <= 1, base_name = \"c\")\nc : 2 x ≤ 1\n\njulia> model\nA JuMP Model\n├ solver: none\n├ objective_sense: FEASIBILITY_SENSE\n├ num_variables: 1\n├ num_constraints: 1\n│ └ AffExpr in MOI.LessThan{Float64}: 1\n└ Names registered in the model\n └ :x\n\njulia> c\nERROR: UndefVarError: `c` not defined\n\njulia> c_binding\nc : 2 x ≤ 1\n\njulia> name(c_binding)\n\"c\"\n\njulia> model[:c_register] = c_binding\nc : 2 x ≤ 1\n\njulia> model\nA JuMP Model\n├ solver: none\n├ objective_sense: FEASIBILITY_SENSE\n├ num_variables: 1\n├ num_constraints: 1\n│ └ AffExpr in MOI.LessThan{Float64}: 1\n└ Names registered in the model\n └ :c_register, :x\n\njulia> model[:c_register]\nc : 2 x ≤ 1\n\njulia> model[:c_register] === c_binding\ntrue\n\njulia> c\nERROR: UndefVarError: `c` not defined","category":"page"},{"location":"manual/constraints/#The-@constraints-macro","page":"Constraints","title":"The @constraints macro","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"If you have many @constraint calls, use the @constraints macro to improve readability:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x);\n\njulia> @constraints(model, begin\n 2x <= 1\n c, x >= -1\n end)\n(2 x ≤ 1, c : x ≥ -1)\n\njulia> print(model)\nFeasibility\nSubject to\n c : x ≥ -1\n 2 x ≤ 1","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"The @constraints macro returns a tuple of the constraints that were defined.","category":"page"},{"location":"manual/constraints/#constraint_duality","page":"Constraints","title":"Duality","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"JuMP adopts the notion of conic duality from MathOptInterface. For linear programs, a feasible dual on a >= constraint is nonnegative and a feasible dual on a <= constraint is nonpositive. If the constraint is an equality constraint, it depends on which direction is binding.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"warning: Warning\nJuMP's definition of duality is independent of the objective sense. That is, the sign of feasible duals associated with a constraint depends on the direction of the constraint and not whether the problem is maximization or minimization. This is a different convention from linear programming duality in some common textbooks. If you have a linear program, and you want the textbook definition, you probably want to use shadow_price and reduced_cost instead.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"The dual value associated with a constraint in the most recent solution can be accessed using the dual function. Use has_duals to check if the model has a dual solution available to query. For example:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model(HiGHS.Optimizer);\n\njulia> set_silent(model)\n\njulia> @variable(model, x)\nx\n\njulia> @constraint(model, con, x <= 1)\ncon : x ≤ 1\n\njulia> @objective(model, Min, -2x)\n-2 x\n\njulia> has_duals(model)\nfalse\n\njulia> optimize!(model)\n\njulia> has_duals(model)\ntrue\n\njulia> dual(con)\n-2.0\n\njulia> @objective(model, Max, 2x)\n2 x\n\njulia> optimize!(model)\n\njulia> dual(con)\n-2.0","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"To help users who may be less familiar with conic duality, JuMP provides shadow_price, which returns a value that can be interpreted as the improvement in the objective in response to an infinitesimal relaxation (on the scale of one unit) in the right-hand side of the constraint. shadow_price can be used only on linear constraints with a <=, >=, or == comparison operator.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"In the example above, dual(con) returned -2.0 regardless of the optimization sense. However, in the second case when the optimization sense is Max, shadow_price returns:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> shadow_price(con)\n2.0","category":"page"},{"location":"manual/constraints/#Duals-of-variable-bounds","page":"Constraints","title":"Duals of variable bounds","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"To query the dual variables associated with a variable bound, first obtain a constraint reference using one of UpperBoundRef, LowerBoundRef, or FixRef, and then call dual on the returned constraint reference. The reduced_cost function may simplify this process as it returns the shadow price of an active bound of a variable (or zero, if no active bound exists).","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model(HiGHS.Optimizer);\n\njulia> set_silent(model)\n\njulia> @variable(model, x <= 1)\nx\n\njulia> @objective(model, Min, -2x)\n-2 x\n\njulia> optimize!(model)\n\njulia> dual(UpperBoundRef(x))\n-2.0\n\njulia> reduced_cost(x)\n-2.0","category":"page"},{"location":"manual/constraints/#Modify-a-constant-term","page":"Constraints","title":"Modify a constant term","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"This section explains how to modify the constant term in a constraint. There are multiple ways to achieve this goal; we explain three options.","category":"page"},{"location":"manual/constraints/#Option-1:-change-the-right-hand-side","page":"Constraints","title":"Option 1: change the right-hand side","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Use set_normalized_rhs to modify the right-hand side (constant) term of a linear or quadratic constraint. Use normalized_rhs to query the right-hand side term.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x);\n\njulia> @constraint(model, con, 2x <= 1)\ncon : 2 x ≤ 1\n\njulia> set_normalized_rhs(con, 3)\n\njulia> con\ncon : 2 x ≤ 3\n\njulia> normalized_rhs(con)\n3.0","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"warning: Warning\nset_normalized_rhs sets the right-hand side term of the normalized constraint. See Normalization for more details.","category":"page"},{"location":"manual/constraints/#Option-2:-use-fixed-variables","page":"Constraints","title":"Option 2: use fixed variables","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"If constraints are complicated, for example, they are composed of a number of components, each of which has a constant term, then it may be difficult to calculate what the right-hand side term is in the standard form.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"For this situation, JuMP includes the ability to fix variables to a value using the fix function. Fixing a variable sets its lower and upper bound to the same value. Thus, changes in a constant term can be simulated by adding a new variable and fixing it to different values. Here is an example:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x);\n\njulia> @variable(model, const_term)\nconst_term\n\njulia> @constraint(model, con, 2x <= const_term + 1)\ncon : 2 x - const_term ≤ 1\n\njulia> fix(const_term, 1.0)","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"The constraint con is now equivalent to 2x <= 2.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"warning: Warning\nFixed variables are not replaced with constants when communicating the problem to a solver. Therefore, even though const_term is fixed, it is still a decision variable, and so const_term * x is bilinear.","category":"page"},{"location":"manual/constraints/#Option-3:-modify-the-function's-constant-term","page":"Constraints","title":"Option 3: modify the function's constant term","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"The third option is to use add_to_function_constant. The constant given is added to the function of a func-in-set constraint. In the following example, adding 2 to the function has the effect of removing 2 to the right-hand side:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x);\n\njulia> @constraint(model, con, 2x <= 1)\ncon : 2 x ≤ 1\n\njulia> add_to_function_constant(con, 2)\n\njulia> con\ncon : 2 x ≤ -1\n\njulia> normalized_rhs(con)\n-1.0","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"In the case of interval constraints, the constant is removed from each bound:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x);\n\njulia> @constraint(model, con, 0 <= 2x + 1 <= 2)\ncon : 2 x ∈ [-1, 1]\n\njulia> add_to_function_constant(con, 3)\n\njulia> con\ncon : 2 x ∈ [-4, -2]","category":"page"},{"location":"manual/constraints/#Modify-a-variable-coefficient","page":"Constraints","title":"Modify a variable coefficient","text":"","category":"section"},{"location":"manual/constraints/#Scalar-constraints","page":"Constraints","title":"Scalar constraints","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"To modify the coefficients for a linear term in a constraint, use set_normalized_coefficient. To query the current coefficient, use normalized_coefficient.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x[1:2]);\n\njulia> @constraint(model, con, 2x[1] + x[2] <= 1)\ncon : 2 x[1] + x[2] ≤ 1\n\njulia> set_normalized_coefficient(con, x[2], 0)\n\njulia> con\ncon : 2 x[1] ≤ 1\n\njulia> normalized_coefficient(con, x[2])\n0.0","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"To modify quadratic terms, pass two variables:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x[1:2]);\n\njulia> @constraint(model, con, x[1]^2 + x[1] * x[2] <= 1)\ncon : x[1]² + x[1]*x[2] ≤ 1\n\njulia> set_normalized_coefficient(con, x[1], x[1], 2)\n\njulia> set_normalized_coefficient(con, x[1], x[2], 3)\n\njulia> con\ncon : 2 x[1]² + 3 x[1]*x[2] ≤ 1\n\njulia> normalized_coefficient(con, x[1], x[1])\n2.0\n\njulia> normalized_coefficient(con, x[1], x[2])\n3.0","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"warning: Warning\nset_normalized_coefficient sets the coefficient of the normalized constraint. See Normalization for more details.","category":"page"},{"location":"manual/constraints/#Vector-constraints","page":"Constraints","title":"Vector constraints","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"To modify the coefficients of a vector-valued constraint, use set_normalized_coefficient.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x)\nx\n\njulia> @constraint(model, con, [2x + 3x, 4x] in MOI.Nonnegatives(2))\ncon : [5 x, 4 x] ∈ MathOptInterface.Nonnegatives(2)\n\njulia> set_normalized_coefficient(con, x, [(1, 3.0)])\n\njulia> con\ncon : [3 x, 4 x] ∈ MathOptInterface.Nonnegatives(2)\n\njulia> set_normalized_coefficient(con, x, [(1, 2.0), (2, 5.0)])\n\njulia> con\ncon : [2 x, 5 x] ∈ MathOptInterface.Nonnegatives(2)","category":"page"},{"location":"manual/constraints/#Delete-a-constraint","page":"Constraints","title":"Delete a constraint","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Use delete to delete a constraint from a model. Use is_valid to check if a constraint belongs to a model and has not been deleted.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x);\n\njulia> @constraint(model, con, 2x <= 1)\ncon : 2 x ≤ 1\n\njulia> is_valid(model, con)\ntrue\n\njulia> delete(model, con)\n\njulia> is_valid(model, con)\nfalse","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Deleting a constraint does not unregister the symbolic reference from the model. Therefore, creating a new constraint of the same name will throw an error:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> @constraint(model, con, 2x <= 1)\nERROR: An object of name con is already attached to this model. If this\n is intended, consider using the anonymous construction syntax, for example,\n `x = @variable(model, [1:N], ...)` where the name of the object does\n not appear inside the macro.\n\n Alternatively, use `unregister(model, :con)` to first unregister\n the existing name from the model. Note that this will not delete the\n object; it will just remove the reference at `model[:con]`.\n[...]","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"After calling delete, call unregister to remove the symbolic reference:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> unregister(model, :con)\n\njulia> @constraint(model, con, 2x <= 1)\ncon : 2 x ≤ 1","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"info: Info\ndelete does not automatically unregister because we do not distinguish between names that are automatically registered by JuMP macros, and names that are manually registered by the user by setting values in object_dictionary. In addition, deleting a constraint and then adding a new constraint of the same name is an easy way to introduce bugs into your code.","category":"page"},{"location":"manual/constraints/#Start-values","page":"Constraints","title":"Start values","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Provide a starting value (also called warmstart) for a constraint's primal and dual solutions using set_start_value and set_dual_start_value.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Query the starting value for a constraint's primal and dual solution using start_value and dual_start_value. If no start value has been set, the methods will return nothing.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x)\nx\n\njulia> @constraint(model, con, x >= 10)\ncon : x ≥ 10\n\njulia> start_value(con)\n\njulia> set_start_value(con, 10.0)\n\njulia> start_value(con)\n10.0\n\njulia> dual_start_value(con)\n\njulia> set_dual_start_value(con, 2)\n\njulia> dual_start_value(con)\n2.0","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Vector-valued constraints require a vector:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x[1:3])\n3-element Vector{VariableRef}:\n x[1]\n x[2]\n x[3]\n\njulia> @constraint(model, con, x in SecondOrderCone())\ncon : [x[1], x[2], x[3]] in MathOptInterface.SecondOrderCone(3)\n\njulia> dual_start_value(con)\n\njulia> set_dual_start_value(con, [1.0, 2.0, 3.0])\n\njulia> dual_start_value(con)\n3-element Vector{Float64}:\n 1.0\n 2.0\n 3.0","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"tip: Tip\nTo simplify setting start values for all variables and constraints in a model, see set_start_values. The Primal and dual warm-starts tutorial also gives a detailed description of how to iterate over constraints in the model to set custom start values.","category":"page"},{"location":"manual/constraints/#Constraint-containers","page":"Constraints","title":"Constraint containers","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Like Variable containers, JuMP provides a mechanism for building groups of constraints compactly. References to these groups of constraints are returned in containers. Three types of constraint containers are supported: Arrays, DenseAxisArrays, and SparseAxisArrays. We explain each of these in the following.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"tip: Tip\nYou can read more about containers in the Containers section.","category":"page"},{"location":"manual/constraints/#constraint_arrays","page":"Constraints","title":"Arrays","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"One way of adding a group of constraints compactly is the following:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x);\n\njulia> @constraint(model, con[i = 1:3], i * x <= i + 1)\n3-element Vector{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{Float64}, MathOptInterface.LessThan{Float64}}, ScalarShape}}:\n con[1] : x ≤ 2\n con[2] : 2 x ≤ 3\n con[3] : 3 x ≤ 4","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"JuMP returns references to the three constraints in an Array that is bound to the Julia variable con. This array can be accessed and sliced as you would with any Julia array:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> con[1]\ncon[1] : x ≤ 2\n\njulia> con[2:3]\n2-element Vector{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{Float64}, MathOptInterface.LessThan{Float64}}, ScalarShape}}:\n con[2] : 2 x ≤ 3\n con[3] : 3 x ≤ 4","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Anonymous containers can also be constructed by dropping the name (for example, con) before the square brackets:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> con = @constraint(model, [i = 1:2], i * x <= i + 1)\n2-element Vector{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{Float64}, MathOptInterface.LessThan{Float64}}, ScalarShape}}:\n x ≤ 2\n 2 x ≤ 3","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Just like @variable, JuMP will form an Array of constraints when it can determine at parse time that the indices are one-based integer ranges. Therefore con[1:b] will create an Array, but con[a:b] will not. A special case is con[Base.OneTo(n)] which will produce an Array. If JuMP cannot determine that the indices are one-based integer ranges (for example, in the case of con[a:b]), JuMP will create a DenseAxisArray instead.","category":"page"},{"location":"manual/constraints/#DenseAxisArrays","page":"Constraints","title":"DenseAxisArrays","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"The syntax for constructing a DenseAxisArray of constraints is very similar to the syntax for constructing a DenseAxisArray of variables.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x);\n\njulia> @constraint(model, con[i = 1:2, j = 2:3], i * x <= j + 1)\n2-dimensional DenseAxisArray{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{Float64}, MathOptInterface.LessThan{Float64}}, ScalarShape},2,...} with index sets:\n Dimension 1, Base.OneTo(2)\n Dimension 2, 2:3\nAnd data, a 2×2 Matrix{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{Float64}, MathOptInterface.LessThan{Float64}}, ScalarShape}}:\n con[1,2] : x ≤ 3 con[1,3] : x ≤ 4\n con[2,2] : 2 x ≤ 3 con[2,3] : 2 x ≤ 4","category":"page"},{"location":"manual/constraints/#SparseAxisArrays","page":"Constraints","title":"SparseAxisArrays","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"The syntax for constructing a SparseAxisArray of constraints is very similar to the syntax for constructing a SparseAxisArray of variables.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x);\n\njulia> @constraint(model, con[i = 1:2, j = 1:2; i != j], i * x <= j + 1)\nJuMP.Containers.SparseAxisArray{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{Float64}, MathOptInterface.LessThan{Float64}}, ScalarShape}, 2, Tuple{Int64, Int64}} with 2 entries:\n [1, 2] = con[1,2] : x ≤ 3\n [2, 1] = con[2,1] : 2 x ≤ 2","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"warning: Warning\nIf you have many index dimensions and a large amount of sparsity, read Performance considerations.","category":"page"},{"location":"manual/constraints/#Forcing-the-container-type","page":"Constraints","title":"Forcing the container type","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"When creating a container of constraints, JuMP will attempt to choose the tightest container type that can store the constraints. However, because this happens at parse time, it does not always make the best choice. Just like in @variable, you can force the type of container using the container keyword. For syntax and the reason behind this, take a look at the variable docs.","category":"page"},{"location":"manual/constraints/#Constraints-with-similar-indices","page":"Constraints","title":"Constraints with similar indices","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Containers are often used to create constraints over a set of indices. However, you'll often have cases in which you are repeating the indices:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x[1:2]);\n\njulia> @variable(model, y[1:2]);\n\njulia> @constraints(model, begin\n [i=1:2, j=1:2, k=1:2], i * x[j] <= k\n [i=1:2, j=1:2, k=1:2], i * y[j] <= k\n end);","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"This is hard to read and leads to a lot of copy-paste. A more readable way is to use a for-loop:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> for i=1:2, j=1:2, k=1:2\n @constraints(model, begin\n i * x[j] <= k\n i * y[j] <= k\n end)\n end","category":"page"},{"location":"manual/constraints/#Accessing-constraints-from-a-model","page":"Constraints","title":"Accessing constraints from a model","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Query the types of function-in-set constraints in a model using list_of_constraint_types:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x[i=1:2] >= i, Int);\n\njulia> @constraint(model, x[1] + x[2] <= 1);\n\njulia> list_of_constraint_types(model)\n3-element Vector{Tuple{Type, Type}}:\n (AffExpr, MathOptInterface.LessThan{Float64})\n (VariableRef, MathOptInterface.GreaterThan{Float64})\n (VariableRef, MathOptInterface.Integer)","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"For a given combination of function and set type, use num_constraints to access the number of constraints and all_constraints to access a list of their references:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> num_constraints(model, VariableRef, MOI.Integer)\n2\n\njulia> cons = all_constraints(model, VariableRef, MOI.Integer)\n2-element Vector{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.VariableIndex, MathOptInterface.Integer}, ScalarShape}}:\n x[1] integer\n x[2] integer","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"You can also count the total number of constraints in the model, but you must explicitly choose whether to count VariableRef constraints such as bound and integrality constraints:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> num_constraints(model; count_variable_in_set_constraints = true)\n5\n\njulia> num_constraints(model; count_variable_in_set_constraints = false)\n1","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"The same also applies for all_constraints:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> all_constraints(model; include_variable_in_set_constraints = true)\n5-element Vector{ConstraintRef}:\n x[1] + x[2] ≤ 1\n x[1] ≥ 1\n x[2] ≥ 2\n x[1] integer\n x[2] integer\n\njulia> all_constraints(model; include_variable_in_set_constraints = false)\n1-element Vector{ConstraintRef}:\n x[1] + x[2] ≤ 1","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"If you need finer-grained control on which constraints to include, use a variant of:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> sum(\n num_constraints(model, F, S) for\n (F, S) in list_of_constraint_types(model) if F != VariableRef\n )\n1","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Use constraint_object to get an instance of an AbstractConstraint object that stores the constraint data:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> con = constraint_object(cons[1])\nScalarConstraint{VariableRef, MathOptInterface.Integer}(x[1], MathOptInterface.Integer())\n\njulia> con.func\nx[1]\n\njulia> con.set\nMathOptInterface.Integer()","category":"page"},{"location":"manual/constraints/#MathOptInterface-constraints","page":"Constraints","title":"MathOptInterface constraints","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Because JuMP is based on MathOptInterface, you can add any constraints supported by MathOptInterface using the function-in-set syntax. For a list of supported functions and sets, read Standard form problem.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"note: Note\nWe use MOI as an alias for the MathOptInterface module. This alias is defined by using JuMP. You may also define it in your code as follows:import MathOptInterface as MOI","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"For example, the following two constraints are equivalent:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x[1:3]);\n\njulia> @constraint(model, 2 * x[1] <= 1)\n2 x[1] ≤ 1\n\njulia> @constraint(model, 2 * x[1] in MOI.LessThan(1.0))\n2 x[1] ≤ 1","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"You can also use any set defined by MathOptInterface:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> @constraint(model, x - [1; 2; 3] in MOI.Nonnegatives(3))\n[x[1] - 1, x[2] - 2, x[3] - 3] ∈ MathOptInterface.Nonnegatives(3)\n\njulia> @constraint(model, x in MOI.ExponentialCone())\n[x[1], x[2], x[3]] ∈ MathOptInterface.ExponentialCone()","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"info: Info\nSimilar to how JuMP defines the <= and >= syntax as a convenience way to specify MOI.LessThan and MOI.GreaterThan constraints, the remaining sections in this page describe functions and syntax that have been added for the convenience of common modeling situations.","category":"page"},{"location":"manual/constraints/#Set-inequality-syntax","page":"Constraints","title":"Set inequality syntax","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"For modeling convenience, the syntax @constraint(model, x >= y, Set()) is short-hand for @constraint(model, x - y in Set()).","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Therefore, the following calls are equivalent:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x[1:2]);\n\njulia> y = [0.5, 0.75];\n\njulia> @constraint(model, x >= y, MOI.Nonnegatives(2))\n[x[1] - 0.5, x[2] - 0.75] ∈ MathOptInterface.Nonnegatives(2)\n\njulia> @constraint(model, x - y in MOI.Nonnegatives(2))\n[x[1] - 0.5, x[2] - 0.75] ∈ MathOptInterface.Nonnegatives(2)","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Non-zero constants are not supported in this syntax:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> @constraint(model, x >= 1, MOI.Nonnegatives(2))\nERROR: Operation `sub_mul` between `Vector{VariableRef}` and `Int64` is not allowed. This most often happens when you write a constraint like `x >= y` where `x` is an array and `y` is a constant. Use the broadcast syntax `x .- y >= 0` instead.\nStacktrace:\n[...]","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Use instead:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> @constraint(model, x .- 1 >= 0, MOI.Nonnegatives(2))\n[x[1] - 1, x[2] - 1] ∈ MathOptInterface.Nonnegatives(2)","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"warning: Warning\nThe syntax @constraint(model, y <= x, Set()) is supported, but it is not recommended because the value of the primal and dual solutions associated with the constraint may be the negative of what you expect.","category":"page"},{"location":"manual/constraints/#Second-order-cone-constraints","page":"Constraints","title":"Second-order cone constraints","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"A SecondOrderCone constrains the variables t and x to the set:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"x_2 le t","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"and t ge 0. It can be added as follows:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, t)\nt\n\njulia> @variable(model, x[1:2])\n2-element Vector{VariableRef}:\n x[1]\n x[2]\n\njulia> @constraint(model, [t; x] in SecondOrderCone())\n[t, x[1], x[2]] ∈ MathOptInterface.SecondOrderCone(3)","category":"page"},{"location":"manual/constraints/#Rotated-second-order-cone-constraints","page":"Constraints","title":"Rotated second-order cone constraints","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"A RotatedSecondOrderCone constrains the variables t, u, and x to the set:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"x_2^2 le 2 t cdot u","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"and t u ge 0. It can be added as follows:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, t)\nt\n\njulia> @variable(model, u)\nu\n\njulia> @variable(model, x[1:2])\n2-element Vector{VariableRef}:\n x[1]\n x[2]\n\njulia> @constraint(model, [t; u; x] in RotatedSecondOrderCone())\n[t, u, x[1], x[2]] ∈ MathOptInterface.RotatedSecondOrderCone(4)","category":"page"},{"location":"manual/constraints/#Special-Ordered-Sets-of-Type-1","page":"Constraints","title":"Special Ordered Sets of Type 1","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"In a Special Ordered Set of Type 1 (often denoted SOS-I or SOS1), at most one element can take a non-zero value.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Construct SOS-I constraints using the SOS1 set:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x[1:3])\n3-element Vector{VariableRef}:\n x[1]\n x[2]\n x[3]\n\njulia> @constraint(model, x in SOS1())\n[x[1], x[2], x[3]] in MathOptInterface.SOS1{Float64}([1.0, 2.0, 3.0])","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Although not required for feasibility, solvers can benefit from an ordering of the variables (for example, the variables represent different factories to build, at most one factory can be built, and the factories can be ordered according to cost). To induce an ordering, a vector of weights can be provided, and the variables are ordered according to their corresponding weight.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"For example, in the constraint:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> @constraint(model, x in SOS1([3.1, 1.2, 2.3]))\n[x[1], x[2], x[3]] in MathOptInterface.SOS1{Float64}([3.1, 1.2, 2.3])","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"the variables x have precedence x[2], x[3], x[1].","category":"page"},{"location":"manual/constraints/#Special-Ordered-Sets-of-Type-2","page":"Constraints","title":"Special Ordered Sets of Type 2","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"In a Special Ordered Set of Type 2 (SOS-II), at most two elements can be non-zero, and if there are two non-zeros, they must be consecutive according to the ordering induced by a weight vector.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Construct SOS-II constraints using the SOS2 set:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> @constraint(model, x in SOS2([3.0, 1.0, 2.0]))\n[x[1], x[2], x[3]] in MathOptInterface.SOS2{Float64}([3.0, 1.0, 2.0])","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"The possible non-zero pairs are (x[1], x[3]) and (x[2], x[3]):","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"If the weight vector is omitted, JuMP induces an ordering from 1:length(x):","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> @constraint(model, x in SOS2())\n[x[1], x[2], x[3]] in MathOptInterface.SOS2{Float64}([1.0, 2.0, 3.0])","category":"page"},{"location":"manual/constraints/#Indicator-constraints","page":"Constraints","title":"Indicator constraints","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Indicator constraints consist of a binary variable and a linear constraint. The constraint holds when the binary variable takes the value 1. The constraint may or may not hold when the binary variable takes the value 0.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"To enforce the constraint x + y <= 1 when the binary variable a is 1, use:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x)\nx\n\njulia> @variable(model, y)\ny\n\njulia> @variable(model, a, Bin)\na\n\njulia> @constraint(model, a --> {x + y <= 1})\na --> {x + y ≤ 1}","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"If the constraint must hold when a is zero, add ! or ¬ before the binary variable;","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> @constraint(model, !a --> {x + y <= 1})\n!a --> {x + y ≤ 1}","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"warning: Warning\nYou cannot use an expression for the left-hand side of an indicator constraint.","category":"page"},{"location":"manual/constraints/#Semidefinite-constraints","page":"Constraints","title":"Semidefinite constraints","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"To constrain a matrix to be positive semidefinite (PSD), use PSDCone:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, X[1:2, 1:2])\n2×2 Matrix{VariableRef}:\n X[1,1] X[1,2]\n X[2,1] X[2,2]\n\njulia> @constraint(model, X >= 0, PSDCone())\n[X[1,1] X[1,2]\n X[2,1] X[2,2]] ∈ PSDCone()","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"tip: Tip\nWhere possible, prefer constructing a matrix of Semidefinite variables using the @variable macro, rather than adding a constraint like @constraint(model, X >= 0, PSDCone()). In some solvers, adding the constraint via @constraint is less efficient, and can result in additional intermediate variables and constraints being added to the model.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"The inequality X >= Y between two square matrices X and Y is understood as constraining X - Y to be positive semidefinite.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> Y = [1 2; 2 1]\n2×2 Matrix{Int64}:\n 1 2\n 2 1\n\njulia> @constraint(model, X >= Y, PSDCone())\n[X[1,1] - 1 X[1,2] - 2\n X[2,1] - 2 X[2,2] - 1] ∈ PSDCone()","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"warning: Warning\nThe syntax @constraint(model, Y <= X, PSDCone()) is supported, but it is not recommended because the value of the primal and dual solutions associated with the constraint may be the negative of what you expect.","category":"page"},{"location":"manual/constraints/#Symmetry","page":"Constraints","title":"Symmetry","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Solvers supporting PSD constraints usually expect to be given a matrix that is symbolically symmetric, that is, for which the expression in corresponding off-diagonal entries are the same. In our example, the expressions of entries (1, 2) and (2, 1) are respectively X[1,2] - 2 and X[2,1] - 2 which are different.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"To bridge the gap between the constraint modeled and what the solver expects, solvers may add an equality constraint X[1,2] - 2 == X[2,1] - 2 to force symmetry. Use LinearAlgebra.Symmetric to explicitly tell the solver that the matrix is symmetric:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> import LinearAlgebra\n\njulia> Z = [X[1, 1] X[1, 2]; X[1, 2] X[2, 2]]\n2×2 Matrix{VariableRef}:\n X[1,1] X[1,2]\n X[1,2] X[2,2]\n\njulia> @constraint(model, LinearAlgebra.Symmetric(Z) >= 0, PSDCone())\n[X[1,1] X[1,2]\n ⋯ X[2,2]] ∈ PSDCone()","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Note that the lower triangular entries are ignored even if they are different so use it with caution:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> @constraint(model, LinearAlgebra.Symmetric(X) >= 0, PSDCone())\n[X[1,1] X[1,2]\n ⋯ X[2,2]] ∈ PSDCone()","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"(Note that no error is thrown, even though X is not symmetric.)","category":"page"},{"location":"manual/constraints/#Complementarity-constraints","page":"Constraints","title":"Complementarity constraints","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"A mixed complementarity constraint F(x) ⟂ x consists of finding x in the interval [lb, ub], such that the following holds:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"F(x) == 0 if lb < x < ub\nF(x) >= 0 if lb == x\nF(x) <= 0 if x == ub","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"JuMP supports mixed complementarity constraints via complements(F(x), x) or F(x) ⟂ x in the @constraint macro. The interval set [lb, ub] is obtained from the variable bounds on x.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"For example, to define the problem 2x - 1 ⟂ x with x ∈ [0, ∞), do:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x >= 0)\nx\n\njulia> @constraint(model, 2x - 1 ⟂ x)\n[2 x - 1, x] ∈ MathOptInterface.Complements(2)","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"This problem has a unique solution at x = 0.5.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"The perp operator ⟂ can be entered in most editors (and the Julia REPL) by typing \\perp.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"An alternative approach that does not require the ⟂ symbol uses the complements function as follows:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> @constraint(model, complements(2x - 1, x))\n[2 x - 1, x] ∈ MathOptInterface.Complements(2)","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"In both cases, the mapping F(x) is supplied as the first argument, and the matching variable x is supplied as the second.","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Vector-valued complementarity constraints are also supported:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> @variable(model, -2 <= y[1:2] <= 2)\n2-element Vector{VariableRef}:\n y[1]\n y[2]\n\njulia> M = [1 2; 3 4]\n2×2 Matrix{Int64}:\n 1 2\n 3 4\n\njulia> q = [5, 6]\n2-element Vector{Int64}:\n 5\n 6\n\njulia> @constraint(model, M * y + q ⟂ y)\n[y[1] + 2 y[2] + 5, 3 y[1] + 4 y[2] + 6, y[1], y[2]] ∈ MathOptInterface.Complements(4)","category":"page"},{"location":"manual/constraints/#Boolean-constraints","page":"Constraints","title":"Boolean constraints","text":"","category":"section"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Add a Boolean constraint (a MOI.EqualTo{Bool} set) using the := operator with a Bool right-hand side term:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = GenericModel{Bool}();\n\njulia> @variable(model, x[1:2]);\n\njulia> @constraint(model, x[1] || x[2] := true)\nx[1] || x[2] = true\n\njulia> @constraint(model, x[1] && x[2] := false)\nx[1] && x[2] = false\n\njulia> model\nA JuMP Model\n├ value_type: Bool\n├ solver: none\n├ objective_sense: FEASIBILITY_SENSE\n├ num_variables: 2\n├ num_constraints: 2\n│ └ GenericNonlinearExpr{GenericVariableRef{Bool}} in MOI.EqualTo{Bool}: 2\n└ Names registered in the model\n └ :x","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"Boolean constraints should not be added using the == operator because JuMP will rewrite the constraint as lhs - rhs = 0, and because constraints like a == b == c require parentheses to disambiguate between (a == b) == c and a == (b == c). In contrast, a == b := c is equivalent to (a == b) := c:","category":"page"},{"location":"manual/constraints/","page":"Constraints","title":"Constraints","text":"julia> model = Model();\n\njulia> @variable(model, x[1:2]);\n\njulia> rhs = false\nfalse\n\njulia> @constraint(model, (x[1] == x[2]) == rhs)\n(x[1] == x[2]) - 0.0 = 0\n\njulia> @constraint(model, x[1] == x[2] := rhs)\nx[1] == x[2] = false","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"EditURL = \"power_systems.jl\"","category":"page"},{"location":"tutorials/applications/power_systems/#Power-Systems","page":"Power Systems","title":"Power Systems","text":"","category":"section"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"This tutorial was generated using Literate.jl. Download the source as a .jl file.","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"This tutorial was originally contributed by Yury Dvorkin and Miles Lubin.","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"This tutorial demonstrates how to formulate basic power systems engineering models in JuMP.","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"We will consider basic \"economic dispatch\" and \"unit commitment\" models without taking into account transmission constraints.","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"For this tutorial, we use the following packages:","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"using JuMP\nimport DataFrames\nimport HiGHS\nimport Plots\nimport StatsPlots","category":"page"},{"location":"tutorials/applications/power_systems/#Economic-dispatch","page":"Power Systems","title":"Economic dispatch","text":"","category":"section"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"Economic dispatch (ED) is an optimization problem that minimizes the cost of supplying energy demand subject to operational constraints on power system assets. In its simplest modification, ED is an LP problem solved for an aggregated load and wind forecast and for a single infinitesimal moment.","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"Mathematically, the ED problem can be written as follows:","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"min sum_i in I c^g_i cdot g_i + c^w cdot w","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"where c_i and g_i are the incremental cost ($/MWh) and power output (MW) of the i^th generator, respectively, and c^w and w are the incremental cost ($/MWh) and wind power injection (MW), respectively.","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"Subject to the constraints:","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"Minimum (g^min) and maximum (g^max) limits on power outputs of generators: g^min_i leq g_i leq g^max_i\nConstraint on the wind power injection: 0 leq w leq w^f where w and w^f are the wind power injection and wind power forecast, respectively.\nPower balance constraint: sum_i in I g_i + w = d^f where d^f is the demand forecast.","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"Further reading on ED models can be found in A. J. Wood, B. F. Wollenberg, and G. B. Sheblé, \"Power Generation, Operation and Control,\" Wiley, 2013.","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"Define some input data about the test system.","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"We define some thermal generators:","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"function ThermalGenerator(\n min::Float64,\n max::Float64,\n fixed_cost::Float64,\n variable_cost::Float64,\n)\n return (\n min = min,\n max = max,\n fixed_cost = fixed_cost,\n variable_cost = variable_cost,\n )\nend\n\ngenerators = [\n ThermalGenerator(0.0, 1000.0, 1000.0, 50.0),\n ThermalGenerator(300.0, 1000.0, 0.0, 100.0),\n]","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"A wind generator","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"WindGenerator(variable_cost::Float64) = (variable_cost = variable_cost,)\n\nwind_generator = WindGenerator(50.0)","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"And a scenario","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"function Scenario(demand::Float64, wind::Float64)\n return (demand = demand, wind = wind)\nend\n\nscenario = Scenario(1500.0, 200.0)","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"Create a function solve_economic_dispatch, which solves the economic dispatch problem for a given set of input parameters.","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"function solve_economic_dispatch(generators::Vector, wind, scenario)\n # Define the economic dispatch (ED) model\n model = Model(HiGHS.Optimizer)\n set_silent(model)\n # Define decision variables\n # power output of generators\n N = length(generators)\n @variable(model, generators[i].min <= g[i = 1:N] <= generators[i].max)\n # wind power injection\n @variable(model, 0 <= w <= scenario.wind)\n # Define the objective function\n @objective(\n model,\n Min,\n sum(generators[i].variable_cost * g[i] for i in 1:N) +\n wind.variable_cost * w,\n )\n # Define the power balance constraint\n @constraint(model, sum(g[i] for i in 1:N) + w == scenario.demand)\n # Solve statement\n optimize!(model)\n @assert is_solved_and_feasible(model)\n # return the optimal value of the objective function and its minimizers\n return (\n g = value.(g),\n w = value(w),\n wind_spill = scenario.wind - value(w),\n total_cost = objective_value(model),\n )\nend","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"Solve the economic dispatch problem","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"solution = solve_economic_dispatch(generators, wind_generator, scenario);\n\nprintln(\"Dispatch of Generators: \", solution.g, \" MW\")\nprintln(\"Dispatch of Wind: \", solution.w, \" MW\")\nprintln(\"Wind spillage: \", solution.wind_spill, \" MW\")\nprintln(\"Total cost: \\$\", solution.total_cost)","category":"page"},{"location":"tutorials/applications/power_systems/#Economic-dispatch-with-adjustable-incremental-costs","page":"Power Systems","title":"Economic dispatch with adjustable incremental costs","text":"","category":"section"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"In the following exercise we adjust the incremental cost of generator G1 and observe its impact on the total cost.","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"function scale_generator_cost(g, scale)\n return ThermalGenerator(g.min, g.max, g.fixed_cost, scale * g.variable_cost)\nend\n\nstart = time()\nc_g_scale_df = DataFrames.DataFrame(;\n # Scale factor\n scale = Float64[],\n # Dispatch of Generator 1 [MW]\n dispatch_G1 = Float64[],\n # Dispatch of Generator 2 [MW]\n dispatch_G2 = Float64[],\n # Dispatch of Wind [MW]\n dispatch_wind = Float64[],\n # Spillage of Wind [MW]\n spillage_wind = Float64[],\n # Total cost [$]\n total_cost = Float64[],\n)\nfor c_g1_scale in 0.5:0.1:3.0\n # Update the incremental cost of the first generator at every iteration.\n new_generators = scale_generator_cost.(generators, [c_g1_scale, 1.0])\n # Solve the economic-dispatch problem with the updated incremental cost\n sol = solve_economic_dispatch(new_generators, wind_generator, scenario)\n push!(\n c_g_scale_df,\n (c_g1_scale, sol.g[1], sol.g[2], sol.w, sol.wind_spill, sol.total_cost),\n )\nend\nprint(string(\"elapsed time: \", time() - start, \" seconds\"))","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"c_g_scale_df","category":"page"},{"location":"tutorials/applications/power_systems/#Modifying-the-JuMP-model-in-place","page":"Power Systems","title":"Modifying the JuMP model in-place","text":"","category":"section"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"Note that in the previous exercise we entirely rebuilt the optimization model at every iteration of the internal loop, which incurs an additional computational burden. This burden can be alleviated if instead of re-building the entire model, we modify the constraints or objective function, as it shown in the example below.","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"Compare the computing time in case of the above and below models.","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"function solve_economic_dispatch_inplace(\n generators::Vector,\n wind,\n scenario,\n scale::AbstractVector{Float64},\n)\n obj_out = Float64[]\n w_out = Float64[]\n g1_out = Float64[]\n g2_out = Float64[]\n # This function only works for two generators\n @assert length(generators) == 2\n model = Model(HiGHS.Optimizer)\n set_silent(model)\n N = length(generators)\n @variable(model, generators[i].min <= g[i = 1:N] <= generators[i].max)\n @variable(model, 0 <= w <= scenario.wind)\n @objective(\n model,\n Min,\n sum(generators[i].variable_cost * g[i] for i in 1:N) +\n wind.variable_cost * w,\n )\n @constraint(model, sum(g[i] for i in 1:N) + w == scenario.demand)\n for c_g1_scale in scale\n @objective(\n model,\n Min,\n c_g1_scale * generators[1].variable_cost * g[1] +\n generators[2].variable_cost * g[2] +\n wind.variable_cost * w,\n )\n optimize!(model)\n @assert is_solved_and_feasible(model)\n push!(obj_out, objective_value(model))\n push!(w_out, value(w))\n push!(g1_out, value(g[1]))\n push!(g2_out, value(g[2]))\n end\n df = DataFrames.DataFrame(;\n scale = scale,\n dispatch_G1 = g1_out,\n dispatch_G2 = g2_out,\n dispatch_wind = w_out,\n spillage_wind = scenario.wind .- w_out,\n total_cost = obj_out,\n )\n return df\nend\n\nstart = time()\ninplace_df = solve_economic_dispatch_inplace(\n generators,\n wind_generator,\n scenario,\n 0.5:0.1:3.0,\n)\nprint(string(\"elapsed time: \", time() - start, \" seconds\"))","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"For small models, adjusting specific constraints or the objective function is sometimes faster and sometimes slower than re-building the entire model. However, as the problem size increases, updating the model in-place is usually faster.","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"inplace_df","category":"page"},{"location":"tutorials/applications/power_systems/#Inefficient-usage-of-wind-generators","page":"Power Systems","title":"Inefficient usage of wind generators","text":"","category":"section"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"The economic dispatch problem does not perform commitment decisions and, thus, assumes that all generators must be dispatched at least at their minimum power output limit. This approach is not cost efficient and may lead to absurd decisions. For example, if d = sum_i in I g^min_i, the wind power injection must be zero, that is, all available wind generation is spilled, to meet the minimum power output constraints on generators.","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"In the following example, we adjust the total demand and observed how it affects wind spillage.","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"demand_scale_df = DataFrames.DataFrame(;\n demand = Float64[],\n dispatch_G1 = Float64[],\n dispatch_G2 = Float64[],\n dispatch_wind = Float64[],\n spillage_wind = Float64[],\n total_cost = Float64[],\n)\n\nfunction scale_demand(scenario, scale)\n return Scenario(scale * scenario.demand, scenario.wind)\nend\n\nfor demand_scale in 0.2:0.1:1.4\n new_scenario = scale_demand(scenario, demand_scale)\n sol = solve_economic_dispatch(generators, wind_generator, new_scenario)\n push!(\n demand_scale_df,\n (\n new_scenario.demand,\n sol.g[1],\n sol.g[2],\n sol.w,\n sol.wind_spill,\n sol.total_cost,\n ),\n )\nend\n\ndemand_scale_df","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"dispatch_plot = StatsPlots.@df(\n demand_scale_df,\n Plots.plot(\n :demand,\n [:dispatch_G1, :dispatch_G2],\n labels = [\"G1\" \"G2\"],\n title = \"Thermal Dispatch\",\n legend = :bottomright,\n linewidth = 3,\n xlabel = \"Demand\",\n ylabel = \"Dispatch [MW]\",\n ),\n)\n\nwind_plot = StatsPlots.@df(\n demand_scale_df,\n Plots.plot(\n :demand,\n [:dispatch_wind, :spillage_wind],\n labels = [\"Dispatch\" \"Spillage\"],\n title = \"Wind\",\n legend = :bottomright,\n linewidth = 3,\n xlabel = \"Demand [MW]\",\n ylabel = \"Energy [MW]\",\n ),\n)\n\nPlots.plot(dispatch_plot, wind_plot)","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"This particular drawback can be overcome by introducing binary decisions on the \"on/off\" status of generators. This model is called unit commitment and considered later in these notes.","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"For further reading on the interplay between wind generation and the minimum power output constraints of generators, we refer interested readers to R. Baldick, \"Wind and energy markets: a case study of Texas,\" IEEE Systems Journal, vol. 6, pp. 27-34, 2012.","category":"page"},{"location":"tutorials/applications/power_systems/#Unit-commitment","page":"Power Systems","title":"Unit commitment","text":"","category":"section"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"The Unit Commitment (UC) model can be obtained from ED model by introducing binary variable associated with each generator. This binary variable can attain two values: if it is \"1,\" the generator is synchronized and, thus, can be dispatched, otherwise, that is, if the binary variable is \"0,\" that generator is not synchronized and its power output is set to 0.","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"To obtain the mathematical formulation of the UC model, we will modify the constraints of the ED model as follows:","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"g^min_i cdot u_ti leq g_i leq g^max_i cdot u_ti","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"where u_i in 01 In this constraint, if u_i = 0, then g_i = 0. On the other hand, if u_i = 1, then g^min_i leq g_i leq g^max_i.","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"For further reading on the UC problem we refer interested readers to G. Morales-Espana, J. M. Latorre, and A. Ramos, \"Tight and Compact MILP Formulation for the Thermal Unit Commitment Problem,\" IEEE Transactions on Power Systems, vol. 28, pp. 4897-4908, 2013.","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"In the following example we convert the ED model explained above to the UC model.","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"function solve_unit_commitment(generators::Vector, wind, scenario)\n model = Model(HiGHS.Optimizer)\n set_silent(model)\n N = length(generators)\n @variable(model, 0 <= g[i = 1:N] <= generators[i].max)\n @variable(model, 0 <= w <= scenario.wind)\n @constraint(model, sum(g[i] for i in 1:N) + w == scenario.demand)\n # !!! New: add binary on-off variables for each generator\n @variable(model, u[i = 1:N], Bin)\n @constraint(model, [i = 1:N], g[i] <= generators[i].max * u[i])\n @constraint(model, [i = 1:N], g[i] >= generators[i].min * u[i])\n @objective(\n model,\n Min,\n sum(generators[i].variable_cost * g[i] for i in 1:N) +\n wind.variable_cost * w +\n # !!! new\n sum(generators[i].fixed_cost * u[i] for i in 1:N)\n )\n optimize!(model)\n status = termination_status(model)\n if status != OPTIMAL\n return (status = status,)\n end\n @assert primal_status(model) == FEASIBLE_POINT\n return (\n status = status,\n g = value.(g),\n w = value(w),\n wind_spill = scenario.wind - value(w),\n u = value.(u),\n total_cost = objective_value(model),\n )\nend","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"Solve the unit commitment problem","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"solution = solve_unit_commitment(generators, wind_generator, scenario)\n\nprintln(\"Dispatch of Generators: \", solution.g, \" MW\")\nprintln(\"Commitments of Generators: \", solution.u)\nprintln(\"Dispatch of Wind: \", solution.w, \" MW\")\nprintln(\"Wind spillage: \", solution.wind_spill, \" MW\")\nprintln(\"Total cost: \\$\", solution.total_cost)","category":"page"},{"location":"tutorials/applications/power_systems/#Unit-commitment-as-a-function-of-demand","page":"Power Systems","title":"Unit commitment as a function of demand","text":"","category":"section"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"After implementing the unit commitment model, we can now assess the interplay between the minimum power output constraints on generators and wind generation.","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"uc_df = DataFrames.DataFrame(;\n demand = Float64[],\n commitment_G1 = Float64[],\n commitment_G2 = Float64[],\n dispatch_G1 = Float64[],\n dispatch_G2 = Float64[],\n dispatch_wind = Float64[],\n spillage_wind = Float64[],\n total_cost = Float64[],\n)\n\nfor demand_scale in 0.2:0.1:1.4\n new_scenario = scale_demand(scenario, demand_scale)\n sol = solve_unit_commitment(generators, wind_generator, new_scenario)\n if sol.status == OPTIMAL\n push!(\n uc_df,\n (\n new_scenario.demand,\n sol.u[1],\n sol.u[2],\n sol.g[1],\n sol.g[2],\n sol.w,\n sol.wind_spill,\n sol.total_cost,\n ),\n )\n end\n println(\"Status: $(sol.status) for demand_scale = $(demand_scale)\")\nend","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"uc_df","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"commitment_plot = StatsPlots.@df(\n uc_df,\n Plots.plot(\n :demand,\n [:commitment_G1, :commitment_G2],\n labels = [\"G1\" \"G2\"],\n title = \"Commitment\",\n legend = :bottomright,\n linewidth = 3,\n xlabel = \"Demand [MW]\",\n ylabel = \"Commitment decision {0, 1}\",\n ),\n)\n\ndispatch_plot = StatsPlots.@df(\n uc_df,\n Plots.plot(\n :demand,\n [:dispatch_G1, :dispatch_G2, :dispatch_wind],\n labels = [\"G1\" \"G2\" \"Wind\"],\n title = \"Dispatch [MW]\",\n legend = :bottomright,\n linewidth = 3,\n xlabel = \"Demand\",\n ylabel = \"Dispatch [MW]\",\n ),\n)\n\nPlots.plot(commitment_plot, dispatch_plot)","category":"page"},{"location":"tutorials/applications/power_systems/#Nonlinear-economic-dispatch","page":"Power Systems","title":"Nonlinear economic dispatch","text":"","category":"section"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"As a final example, we modify our economic dispatch problem in two ways:","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"The thermal cost function is user-defined\nThe output of the wind is only the square-root of the dispatch","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"import Ipopt\n\n\"\"\"\n thermal_cost_function(g)\n\nA user-defined thermal cost function in pure-Julia! You can include\nnonlinearities, and even things like control flow.\n\n!!! warning\n It's still up to you to make sure that the function has a meaningful\n derivative.\n\"\"\"\nfunction thermal_cost_function(g)\n if g <= 500\n return g\n else\n return g + 1e-2 * (g - 500)^2\n end\nend\n\nfunction solve_nonlinear_economic_dispatch(\n generators::Vector,\n wind,\n scenario;\n silent::Bool = false,\n)\n model = Model(Ipopt.Optimizer)\n if silent\n set_silent(model)\n end\n @operator(model, op_tcf, 1, thermal_cost_function)\n N = length(generators)\n @variable(model, generators[i].min <= g[i = 1:N] <= generators[i].max)\n @variable(model, 0 <= w <= scenario.wind)\n @objective(\n model,\n Min,\n sum(generators[i].variable_cost * op_tcf(g[i]) for i in 1:N) +\n wind.variable_cost * w,\n )\n @constraint(model, sum(g[i] for i in 1:N) + sqrt(w) == scenario.demand)\n optimize!(model)\n @assert is_solved_and_feasible(model)\n return (\n g = value.(g),\n w = value(w),\n wind_spill = scenario.wind - value(w),\n total_cost = objective_value(model),\n )\nend\n\nsolution =\n solve_nonlinear_economic_dispatch(generators, wind_generator, scenario)","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"Now let's see how the wind is dispatched as a function of the cost:","category":"page"},{"location":"tutorials/applications/power_systems/","page":"Power Systems","title":"Power Systems","text":"wind_cost = 0.0:1:100\nwind_dispatch = Float64[]\nfor c in wind_cost\n sol = solve_nonlinear_economic_dispatch(\n generators,\n WindGenerator(c),\n scenario;\n silent = true,\n )\n push!(wind_dispatch, sol.w)\nend\n\nPlots.plot(\n wind_cost,\n wind_dispatch;\n xlabel = \"Cost\",\n ylabel = \"Dispatch [MW]\",\n label = false,\n)","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"EditURL = \"logistic_regression.jl\"","category":"page"},{"location":"tutorials/conic/logistic_regression/#Example:-logistic-regression","page":"Example: logistic regression","title":"Example: logistic regression","text":"","category":"section"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"This tutorial was generated using Literate.jl. Download the source as a .jl file.","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"This tutorial was originally contributed by François Pacaud.","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"This tutorial shows how to solve a logistic regression problem with JuMP. Logistic regression is a well known method in machine learning, useful when we want to classify binary variables with the help of a given set of features. To this goal, we find the optimal combination of features maximizing the (log)-likelihood onto a training set.","category":"page"},{"location":"tutorials/conic/logistic_regression/#Required-packages","page":"Example: logistic regression","title":"Required packages","text":"","category":"section"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"This tutorial uses the following packages:","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"using JuMP\nimport MathOptInterface as MOI\nimport Random\nimport SCS\n\nRandom.seed!(2713);\nnothing #hide","category":"page"},{"location":"tutorials/conic/logistic_regression/#Formulating-the-logistic-regression-problem","page":"Example: logistic regression","title":"Formulating the logistic regression problem","text":"","category":"section"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"Suppose we have a set of training data-point i = 1 cdots n, where for each i we have a vector of features x_i in mathbbR^p and a categorical observation y_i in -1 1.","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"The log-likelihood is given by","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"l(theta) = sum_i=1^n log(dfrac11 + exp(-y_i theta^top x_i))","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"and the optimal theta minimizes the logistic loss function:","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"min_theta sum_i=1^n log(1 + exp(-y_i theta^top x_i))","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"Most of the time, instead of solving directly the previous optimization problem, we prefer to add a regularization term:","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"min_theta sum_i=1^n log(1 + exp(-y_i theta^top x_i)) + lambda theta ","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"with lambda in mathbbR_+ a penalty and a norm function. By adding such a regularization term, we avoid overfitting on the training set and usually achieve a greater score in cross-validation.","category":"page"},{"location":"tutorials/conic/logistic_regression/#Reformulation-as-a-conic-optimization-problem","page":"Example: logistic regression","title":"Reformulation as a conic optimization problem","text":"","category":"section"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"By introducing auxiliary variables t_1 cdots t_n and r, the optimization problem is equivalent to","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"beginaligned\nmin_t r theta sum_i=1^n t_i + lambda r \ntextsubject to quad t_i geq log(1 + exp(- y_i theta^top x_i)) \n quad r geq theta\nendaligned","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"Now, the trick is to reformulate the constraints t_i geq log(1 + exp(- y_i theta^top x_i)) with the help of the exponential cone","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"K_exp = (x y z) in mathbbR^3 y exp(x y) leq z ","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"Indeed, by passing to the exponential, we see that for all i=1 cdots n, the constraint t_i geq log(1 + exp(- y_i theta^top x_i)) is equivalent to","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"exp(-t_i) + exp(u_i - t_i) leq 1","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"with u_i = -y_i theta^top x_i. Then, by adding two auxiliary variables z_i1 and z_i2 such that z_i1 geq exp(u_i-t_i) and z_i2 geq exp(-t_i), we get the equivalent formulation","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"left\nbeginaligned\n(u_i -t_i 1 z_i1) in K_exp \n(-t_i 1 z_i2) in K_exp \nz_i1 + z_i2 leq 1\nendaligned\nright","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"In this setting, the conic version of the logistic regression problems writes out","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"beginaligned\nmin_t z r theta sum_i=1^n t_i + lambda r \ntextsubject to quad (u_i -t_i 1 z_i1) in K_exp \n quad (-t_i 1 z_i2) in K_exp \n quad z_i1 + z_i2 leq 1 \n quad u_i = -y_i x_i^top theta \n quad r geq theta\nendaligned","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"and thus encompasses 3n + p + 1 variables and 3n + 1 constraints (u_i = -y_i theta^top x_i is only a virtual constraint used to clarify the notation). Thus, if n gg 1, we get a large number of variables and constraints.","category":"page"},{"location":"tutorials/conic/logistic_regression/#Fitting-logistic-regression-with-a-conic-solver","page":"Example: logistic regression","title":"Fitting logistic regression with a conic solver","text":"","category":"section"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"We start by implementing a function to generate a fake dataset, and where we could tune the correlation between the feature variables. The function is a direct transcription of the one used in this blog post.","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"function generate_dataset(n_samples = 100, n_features = 10; shift = 0.0)\n X = randn(n_samples, n_features)\n w = randn(n_features)\n y = sign.(X * w)\n X .+= 0.8 * randn(n_samples, n_features) # add noise\n X .+= shift # shift the points in the feature space\n X = hcat(X, ones(n_samples, 1))\n return X, y\nend","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"We write a softplus function to formulate each constraint t geq log(1 + exp(u)) with two exponential cones.","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"function softplus(model, t, u)\n z = @variable(model, [1:2], lower_bound = 0.0)\n @constraint(model, sum(z) <= 1.0)\n @constraint(model, [u - t, 1, z[1]] in MOI.ExponentialCone())\n @constraint(model, [-t, 1, z[2]] in MOI.ExponentialCone())\nend","category":"page"},{"location":"tutorials/conic/logistic_regression/#\\ell_2-regularized-logistic-regression","page":"Example: logistic regression","title":"ell_2 regularized logistic regression","text":"","category":"section"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"Then, with the help of the softplus function, we could write our optimization model. In the ell_2 regularization case, the constraint r geq theta_2 rewrites as a second order cone constraint.","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"function build_logit_model(X, y, λ)\n n, p = size(X)\n model = Model()\n @variable(model, θ[1:p])\n @variable(model, t[1:n])\n for i in 1:n\n u = -(X[i, :]' * θ) * y[i]\n softplus(model, t[i], u)\n end\n # Add ℓ2 regularization\n @variable(model, 0.0 <= reg)\n @constraint(model, [reg; θ] in SecondOrderCone())\n # Define objective\n @objective(model, Min, sum(t) + λ * reg)\n return model\nend","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"We generate the dataset.","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"warning: Warning\nBe careful here, for large n and p SCS could fail to converge.","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"n, p = 200, 10\nX, y = generate_dataset(n, p; shift = 10.0);\n\n# We could now solve the logistic regression problem\nλ = 10.0\nmodel = build_logit_model(X, y, λ)\nset_optimizer(model, SCS.Optimizer)\nset_silent(model)\noptimize!(model)\n@assert is_solved_and_feasible(model)","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"θ♯ = value.(model[:θ])","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"It appears that the speed of convergence is not that impacted by the correlation of the dataset, nor by the penalty lambda.","category":"page"},{"location":"tutorials/conic/logistic_regression/#\\ell_1-regularized-logistic-regression","page":"Example: logistic regression","title":"ell_1 regularized logistic regression","text":"","category":"section"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"We now formulate the logistic problem with a ell_1 regularization term. The ell_1 regularization ensures sparsity in the optimal solution of the resulting optimization problem. Luckily, the ell_1 norm is implemented as a set in MathOptInterface. Thus, we could formulate the sparse logistic regression problem with the help of a MOI.NormOneCone set.","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"function build_sparse_logit_model(X, y, λ)\n n, p = size(X)\n model = Model()\n @variable(model, θ[1:p])\n @variable(model, t[1:n])\n for i in 1:n\n u = -(X[i, :]' * θ) * y[i]\n softplus(model, t[i], u)\n end\n # Add ℓ1 regularization\n @variable(model, 0.0 <= reg)\n @constraint(model, [reg; θ] in MOI.NormOneCone(p + 1))\n # Define objective\n @objective(model, Min, sum(t) + λ * reg)\n return model\nend\n\n# Auxiliary function to count non-null components:\ncount_nonzero(v::Vector; tol = 1e-6) = sum(abs.(v) .>= tol)\n\n# We solve the sparse logistic regression problem on the same dataset as before.\nλ = 10.0\nsparse_model = build_sparse_logit_model(X, y, λ)\nset_optimizer(sparse_model, SCS.Optimizer)\nset_silent(sparse_model)\noptimize!(sparse_model)\n@assert is_solved_and_feasible(sparse_model)","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"θ♯ = value.(sparse_model[:θ])\nprintln(\n \"Number of non-zero components: \",\n count_nonzero(θ♯),\n \" (out of \",\n p,\n \" features)\",\n)","category":"page"},{"location":"tutorials/conic/logistic_regression/#Extensions","page":"Example: logistic regression","title":"Extensions","text":"","category":"section"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"A direct extension would be to consider the sparse logistic regression with hard thresholding, which, on contrary to the soft version using a ell_1 regularization, adds an explicit cardinality constraint in its formulation:","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"beginaligned\nmin_theta sum_i=1^n log(1 + exp(-y_i theta^top x_i)) + lambda theta _2^2 \ntextsubject to quad theta _0 = k\nendaligned","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"where k is the maximum number of non-zero components in the vector theta, and _0 is the ell_0 pseudo-norm:","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":" x_0 = i x_i neq 0","category":"page"},{"location":"tutorials/conic/logistic_regression/","page":"Example: logistic regression","title":"Example: logistic regression","text":"The cardinality constraint theta_0 leq k could be reformulated with binary variables. Thus the hard sparse regression problem could be solved by any solver supporting mixed integer conic problems.","category":"page"},{"location":"moi/background/motivation/","page":"Motivation","title":"Motivation","text":"EditURL = \"https://github.com/jump-dev/MathOptInterface.jl/blob/v1.34.0/docs/src/background/motivation.md\"","category":"page"},{"location":"moi/background/motivation/#Motivation","page":"Motivation","title":"Motivation","text":"","category":"section"},{"location":"moi/background/motivation/","page":"Motivation","title":"Motivation","text":"MathOptInterface (MOI) is a replacement for MathProgBase, the first-generation abstraction layer for mathematical optimization previously used by JuMP and Convex.jl.","category":"page"},{"location":"moi/background/motivation/","page":"Motivation","title":"Motivation","text":"To address a number of limitations of MathProgBase, MOI is designed to:","category":"page"},{"location":"moi/background/motivation/","page":"Motivation","title":"Motivation","text":"Be simple and extensible\nunifying linear, quadratic, and conic optimization,\nseamlessly facilitating extensions to essentially arbitrary constraints and functions (for example, indicator constraints, complementarity constraints, and piecewise-linear functions)\nBe fast\nby allowing access to a solver's in-memory representation of a problem without writing intermediate files (when possible)\nby using multiple dispatch and avoiding requiring containers of non-concrete types\nAllow a solver to return multiple results (for example, a pool of solutions)\nAllow a solver to return extra arbitrary information via attributes (for example, variable- and constraint-wise membership in an irreducible inconsistent subset for infeasibility analysis)\nProvide a greatly expanded set of status codes explaining what happened during the optimization procedure\nEnable a solver to more precisely specify which problem classes it supports\nEnable both primal and dual warm starts\nEnable adding and removing both variables and constraints by indices that are not required to be consecutive\nEnable any modification that the solver supports to an existing model\nAvoid requiring the solver wrapper to store an additional copy of the problem data","category":"page"},{"location":"tutorials/conic/introduction/#Introduction","page":"Introduction","title":"Introduction","text":"","category":"section"},{"location":"tutorials/conic/introduction/","page":"Introduction","title":"Introduction","text":"Conic programs are a class of convex nonlinear optimization problems which use cones to represent the nonlinearities. They have the form:","category":"page"},{"location":"tutorials/conic/introduction/","page":"Introduction","title":"Introduction","text":"beginalign\n min_x in mathbbR^n f_0(x) \n textst f_j(x) in mathcalS_j j = 1 ldots m\nendalign","category":"page"},{"location":"tutorials/conic/introduction/","page":"Introduction","title":"Introduction","text":"Mixed-integer conic programs (MICPs) are extensions of conic programs in which some (or all) of the decision variables take discrete values.","category":"page"},{"location":"tutorials/conic/introduction/#How-to-choose-a-solver","page":"Introduction","title":"How to choose a solver","text":"","category":"section"},{"location":"tutorials/conic/introduction/","page":"Introduction","title":"Introduction","text":"JuMP supports a range of conic solvers, although support differs on what types of cones each solver supports. In the list of Supported solvers, \"SOCP\" denotes solvers supporting second-order cones and \"SDP\" denotes solvers supporting semidefinite cones. In addition, solvers such as SCS and Mosek have support for the exponential cone. Moreover, due to the bridging system in MathOptInterface, many of these solvers support a much wider range of exotic cones than they natively support. Solvers supporting discrete variables start with \"(MI)\" in the list of Supported solvers.","category":"page"},{"location":"tutorials/conic/introduction/","page":"Introduction","title":"Introduction","text":"tip: Tip\nDuality plays a large role in solving conic optimization models. Depending on the solver, it can be more efficient to solve the dual instead of the primal. If performance is an issue, see the Dualization tutorial for more details.","category":"page"},{"location":"tutorials/conic/introduction/#How-these-tutorials-are-structured","page":"Introduction","title":"How these tutorials are structured","text":"","category":"section"},{"location":"tutorials/conic/introduction/","page":"Introduction","title":"Introduction","text":"Having a high-level overview of how this part of the documentation is structured will help you know where to look for certain things.","category":"page"},{"location":"tutorials/conic/introduction/","page":"Introduction","title":"Introduction","text":"The following tutorials are worked examples that present a problem in words, then formulate it in mathematics, and then solve it in JuMP. This usually involves some sort of visualization of the solution. Start here if you are new to JuMP.\nExample: experiment design\nExample: logistic regression\nThe Modeling with cones tutorial contains a number of helpful reformulations and tricks you can use when modeling conic programs. Look here if you are stuck trying to formulate a problem as a conic program.\nThe remaining tutorials are less verbose and styled in the form of short code examples. These tutorials have less explanation, but may contain useful code snippets, particularly if they are similar to a problem you are trying to solve.","category":"page"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"EditURL = \"https://github.com/jump-dev/MathOptInterface.jl/blob/v1.34.0/docs/src/tutorials/manipulating_expressions.md\"","category":"page"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"CurrentModule = MathOptInterface\nDocTestSetup = quote\n import MathOptInterface as MOI\nend\nDocTestFilters = [r\"MathOptInterface|MOI\"]","category":"page"},{"location":"moi/tutorials/manipulating_expressions/#Manipulating-expressions","page":"Manipulating expressions","title":"Manipulating expressions","text":"","category":"section"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"This guide highlights a syntactically appealing way to build expressions at the MOI level, but also to look at their contents. It may be especially useful when writing models or bridge code.","category":"page"},{"location":"moi/tutorials/manipulating_expressions/#Creating-functions","page":"Manipulating expressions","title":"Creating functions","text":"","category":"section"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"This section details the ways to create functions with MathOptInterface.","category":"page"},{"location":"moi/tutorials/manipulating_expressions/#Creating-scalar-affine-functions","page":"Manipulating expressions","title":"Creating scalar affine functions","text":"","category":"section"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"The simplest scalar function is simply a variable:","category":"page"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"julia> x = MOI.add_variable(model) # Create the variable x\nMOI.VariableIndex(1)","category":"page"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"This type of function is extremely simple; to express more complex functions, other types must be used. For instance, a ScalarAffineFunction is a sum of linear terms (a factor times a variable) and a constant. Such an object can be built using the standard constructor:","category":"page"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"julia> f = MOI.ScalarAffineFunction([MOI.ScalarAffineTerm(1, x)], 2) # x + 2\n(2) + (1) MOI.VariableIndex(1)","category":"page"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"However, you can also use operators to build the same scalar function:","category":"page"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"julia> f = x + 2\n(2) + (1) MOI.VariableIndex(1)","category":"page"},{"location":"moi/tutorials/manipulating_expressions/#Creating-scalar-quadratic-functions","page":"Manipulating expressions","title":"Creating scalar quadratic functions","text":"","category":"section"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"Scalar quadratic functions are stored in ScalarQuadraticFunction objects, in a way that is highly similar to scalar affine functions. You can obtain a quadratic function as a product of affine functions:","category":"page"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"julia> 1 * x * x\n(0) + 1.0 MOI.VariableIndex(1)²\n\njulia> f * f # (x + 2)²\n(4) + (2) MOI.VariableIndex(1) + (2) MOI.VariableIndex(1) + 1.0 MOI.VariableIndex(1)²\n\njulia> f^2 # (x + 2)² too\n(4) + (2) MOI.VariableIndex(1) + (2) MOI.VariableIndex(1) + 1.0 MOI.VariableIndex(1)²","category":"page"},{"location":"moi/tutorials/manipulating_expressions/#Creating-vector-functions","page":"Manipulating expressions","title":"Creating vector functions","text":"","category":"section"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"A vector function is a function with several values, irrespective of the number of input variables. Similarly to scalar functions, there are three main types of vector functions: VectorOfVariables, VectorAffineFunction, and VectorQuadraticFunction.","category":"page"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"The easiest way to create a vector function is to stack several scalar functions using Utilities.vectorize. It takes a vector as input, and the generated vector function (of the most appropriate type) has each dimension corresponding to a dimension of the vector.","category":"page"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"julia> g = MOI.Utilities.vectorize([f, 2 * f])\n┌ ┐\n│(2) + (1) MOI.VariableIndex(1)│\n│(4) + (2) MOI.VariableIndex(1)│\n└ ┘","category":"page"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"warning: Warning\nUtilities.vectorize only takes a vector of similar scalar functions: you cannot mix VariableIndex and ScalarAffineFunction, for instance. In practice, it means that Utilities.vectorize([x, f]) does not work; you should rather use Utilities.vectorize([1 * x, f]) instead to only have ScalarAffineFunction objects.","category":"page"},{"location":"moi/tutorials/manipulating_expressions/#Canonicalizing-functions","page":"Manipulating expressions","title":"Canonicalizing functions","text":"","category":"section"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"In more advanced use cases, you might need to ensure that a function is \"canonical.\" Functions are stored as an array of terms, but there is no check that these terms are redundant: a ScalarAffineFunction object might have two terms with the same variable, like x + x + 1. These terms could be merged without changing the semantics of the function: 2x + 1.","category":"page"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"Working with these objects might be cumbersome. Canonicalization helps maintain redundancy to zero.","category":"page"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"Utilities.is_canonical checks whether a function is already in its canonical form:","category":"page"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"julia> MOI.Utilities.is_canonical(f + f) # (x + 2) + (x + 2) is stored as x + x + 4\nfalse","category":"page"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"Utilities.canonical returns the equivalent canonical version of the function:","category":"page"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"julia> MOI.Utilities.canonical(f + f) # Returns 2x + 4\n(4) + (2) MOI.VariableIndex(1)","category":"page"},{"location":"moi/tutorials/manipulating_expressions/#Exploring-functions","page":"Manipulating expressions","title":"Exploring functions","text":"","category":"section"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"At some point, you might need to dig into a function, for instance to map it into solver constructs.","category":"page"},{"location":"moi/tutorials/manipulating_expressions/#Vector-functions","page":"Manipulating expressions","title":"Vector functions","text":"","category":"section"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"Utilities.scalarize returns a vector of scalar functions from a vector function:","category":"page"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"julia> MOI.Utilities.scalarize(g) # Returns a vector [f, 2 * f].\n2-element Vector{MathOptInterface.ScalarAffineFunction{Int64}}:\n (2) + (1) MOI.VariableIndex(1)\n (4) + (2) MOI.VariableIndex(1)","category":"page"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"note: Note\nUtilities.eachscalar returns an iterator on the dimensions, which serves the same purpose as Utilities.scalarize.","category":"page"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"output_dimension returns the number of dimensions of the output of a function:","category":"page"},{"location":"moi/tutorials/manipulating_expressions/","page":"Manipulating expressions","title":"Manipulating expressions","text":"julia> MOI.output_dimension(g)\n2","category":"page"},{"location":"packages/MosekTools/","page":"jump-dev/MosekTools.jl","title":"jump-dev/MosekTools.jl","text":"EditURL = \"https://github.com/jump-dev/MosekTools.jl/blob/v0.15.1/README.md\"","category":"page"},{"location":"packages/MosekTools/#MosekTools.jl","page":"jump-dev/MosekTools.jl","title":"MosekTools.jl","text":"","category":"section"},{"location":"packages/MosekTools/","page":"jump-dev/MosekTools.jl","title":"jump-dev/MosekTools.jl","text":"MosekTools.jl is the MathOptInterface.jl implementation for the MOSEK solver.","category":"page"},{"location":"packages/MosekTools/","page":"jump-dev/MosekTools.jl","title":"jump-dev/MosekTools.jl","text":"The low-level solver API for MOSEK is found in the package Mosek.jl.","category":"page"},{"location":"packages/MosekTools/#Affiliation","page":"jump-dev/MosekTools.jl","title":"Affiliation","text":"","category":"section"},{"location":"packages/MosekTools/","page":"jump-dev/MosekTools.jl","title":"jump-dev/MosekTools.jl","text":"MosekTools.jl is maintained by the JuMP community and is not officially supported by MOSEK. However, Mosek.jl is an officially supported product of MOSEK.","category":"page"},{"location":"packages/MosekTools/#License","page":"jump-dev/MosekTools.jl","title":"License","text":"","category":"section"},{"location":"packages/MosekTools/","page":"jump-dev/MosekTools.jl","title":"jump-dev/MosekTools.jl","text":"MosekTools.jl is licensed under the MIT License.","category":"page"},{"location":"packages/MosekTools/","page":"jump-dev/MosekTools.jl","title":"jump-dev/MosekTools.jl","text":"The underlying solver is a closed-source commercial product for which you must obtain a license.","category":"page"},{"location":"packages/MosekTools/#Installation","page":"jump-dev/MosekTools.jl","title":"Installation","text":"","category":"section"},{"location":"packages/MosekTools/","page":"jump-dev/MosekTools.jl","title":"jump-dev/MosekTools.jl","text":"The latest release of this package and the master branch are to be used with the latest release of Mosek.jl (which uses MOSEK v10).","category":"page"},{"location":"packages/MosekTools/","page":"jump-dev/MosekTools.jl","title":"jump-dev/MosekTools.jl","text":"To use MOSEK v9 (resp. v8), use the v0.12.x (resp. v0.7.x) releases of this package, and the mosekv9 (resp. mosekv8) branch and v1.2.x (resp. v0.9.x) releases of Mosek.jl.","category":"page"},{"location":"packages/MosekTools/","page":"jump-dev/MosekTools.jl","title":"jump-dev/MosekTools.jl","text":"See the following table for a summary:","category":"page"},{"location":"packages/MosekTools/","page":"jump-dev/MosekTools.jl","title":"jump-dev/MosekTools.jl","text":"MOSEK Mosek.jl MosekTools.jl release MosekTools.jl branch\nv10 v10 v0.13 master\nv9 v0.12 v0.12 mosekv9\nv8 v0.9 v0.7 mosekv8","category":"page"},{"location":"packages/MosekTools/#Use-with-JuMP","page":"jump-dev/MosekTools.jl","title":"Use with JuMP","text":"","category":"section"},{"location":"packages/MosekTools/","page":"jump-dev/MosekTools.jl","title":"jump-dev/MosekTools.jl","text":"using JuMP\nusing MosekTools\nmodel = Model(Mosek.Optimizer)\nset_attribute(model, \"QUIET\", true)\nset_attribute(model, \"INTPNT_CO_TOL_DFEAS\", 1e-7)","category":"page"},{"location":"packages/MosekTools/#Options","page":"jump-dev/MosekTools.jl","title":"Options","text":"","category":"section"},{"location":"packages/MosekTools/","page":"jump-dev/MosekTools.jl","title":"jump-dev/MosekTools.jl","text":"The parameter QUIET is a special parameter that when set to true disables all Mosek printing output.","category":"page"},{"location":"packages/MosekTools/","page":"jump-dev/MosekTools.jl","title":"jump-dev/MosekTools.jl","text":"All other parameters can be found in the Mosek documentation.","category":"page"},{"location":"packages/MosekTools/","page":"jump-dev/MosekTools.jl","title":"jump-dev/MosekTools.jl","text":"Note that the prefix MSK_IPAR_ (for integer parameters), MSK_DPAR_ (for floating point parameters) or MSK_SPAR_ (for string parameters) are optional. If they are not given, they are inferred from the type of the value. For example, in the example above, as 1e-7 is a floating point number, the parameters name used is MSK_DPAR_INTPNT_CO_TOL_DFEAS.","category":"page"},{"location":"developers/style/#Style-guide-and-design-principles","page":"Style Guide","title":"Style guide and design principles","text":"","category":"section"},{"location":"developers/style/#Style-guide","page":"Style Guide","title":"Style guide","text":"","category":"section"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"This section describes the coding style rules that apply to JuMP code and that we recommend for JuMP models and surrounding Julia code. The motivations for a style guide include:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"conveying best practices for writing readable and maintainable code\nreducing the amount of time spent on bike-shedding by establishing basic naming and formatting conventions\nlowering the barrier for new contributors by codifying the existing practices (for example, you can be more confident your code will pass review if you follow the style guide)","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"In some cases, the JuMP style guide diverges from the Julia style guide. All such cases will be explicitly noted and justified.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"The JuMP style guide adopts many recommendations from the Google style guides.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"info: Info\nThe style guide is always a work in progress, and not all JuMP code follows the rules. When modifying JuMP, please fix the style violations of the surrounding code (that is, leave the code tidier than when you started). If large changes are needed, consider separating them into another PR.","category":"page"},{"location":"developers/style/#JuliaFormatter","page":"Style Guide","title":"JuliaFormatter","text":"","category":"section"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"JuMP uses JuliaFormatter.jl as an auto-formatting tool.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"We use the options contained in .JuliaFormatter.toml.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"To format code, cd to the JuMP directory, then run:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"] add JuliaFormatter@1\nusing JuliaFormatter\nformat(\"docs\")\nformat(\"src\")\nformat(\"test\")","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"info: Info\nA continuous integration check verifies that all PRs made to JuMP have passed the formatter.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"The following sections outline extra style guide points that are not fixed automatically by JuliaFormatter.","category":"page"},{"location":"developers/style/#Abstract-types-and-composition","page":"Style Guide","title":"Abstract types and composition","text":"","category":"section"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Specifying types for method arguments is mostly optional in Julia. The benefit of abstract method arguments is that it enables functions and types from one package to be used with functions and types from another package via multiple dispatch.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"However, abstractly typed methods have two main drawbacks:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"It's possible to find out that you are working with unexpected types deep in the call chain, potentially leading to hard-to-diagnose MethodErrors.\nUntyped function arguments can lead to correctness problems if the user's choice of input type does not satisfy the assumptions made by the author of the function.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"As a motivating example, consider the following function:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"julia> function my_sum(x)\n y = 0.0\n for i in 1:length(x)\n y += x[i]\n end\n return y\n end\nmy_sum (generic function with 1 method)","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"This function contains a number of implicit assumptions about the type of x:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"x supports 1-based getindex and implements length\nThe element type of x supports addition with 0.0, and then with the result of x + 0.0.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"info: Info\nAs a motivating example for the second point, VariableRef plus Float64 produces an AffExpr. Do not assume that +(::A, ::B) produces an instance of the type A or B.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"my_sum works as expected if the user passes in Vector{Float64}:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"julia> my_sum([1.0, 2.0, 3.0])\n6.0","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"but it doesn't respect input types, for example returning a Float64 if the user passes Vector{Int}:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"julia> my_sum([1, 2, 3])\n6.0","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"but it throws a MethodError if the user passes String:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"julia> my_sum(\"abc\")\nERROR: MethodError: no method matching +(::Float64, ::Char)\n[...]","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"This particular MethodError is hard to debug, particularly for new users, because it mentions +, Float64, and Char, none of which were called or passed by the user.","category":"page"},{"location":"developers/style/#Dealing-with-MethodErrors","page":"Style Guide","title":"Dealing with MethodErrors","text":"","category":"section"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"This section diverges from the Julia style guide, as well as other common guides like SciML. The following suggestions are intended to provide a friendlier experience for novice Julia programmers, at the cost of limiting the power and flexibility of advanced Julia programmers.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Code should follow the MethodError principle:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"info: The MethodError principle\nA user should see a MethodError only for methods that they called directly.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Bad:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"_internal_function(x::Integer) = x + 1\n# The user sees a MethodError for _internal_function when calling\n# public_function(\"a string\"). This is not very helpful.\npublic_function(x) = _internal_function(x)","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Good:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"_internal_function(x::Integer) = x + 1\n# The user sees a MethodError for public_function when calling\n# public_function(\"a string\"). This is easy to understand.\npublic_function(x::Integer) = _internal_function(x)","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"If it is hard to provide an error message at the top of the call chain, then the following pattern is also ok:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"_internal_function(x::Integer) = x + 1\nfunction _internal_function(x)\n error(\n \"Internal error. This probably means that you called \" *\n \"`public_function()`s with the wrong type.\",\n )\nend\npublic_function(x) = _internal_function(x)","category":"page"},{"location":"developers/style/#Dealing-with-correctness","page":"Style Guide","title":"Dealing with correctness","text":"","category":"section"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Dealing with correctness is harder, because Julia has no way of formally specifying interfaces that abstract types must implement. Instead, here are two options that you can use when writing and interacting with generic code:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Option 1: use concrete types and let users extend new methods.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"In this option, explicitly restrict input arguments to concrete types that are tested and have been validated for correctness. For example:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"julia> function my_sum_option_1(x::Vector{Float64})\n y = 0.0\n for i in 1:length(x)\n y += x[i]\n end\n return y\n end\nmy_sum_option_1 (generic function with 1 method)\n\njulia> my_sum_option_1([1.0, 2.0, 3.0])\n6.0","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Using concrete types satisfies the MethodError principle:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"julia> my_sum_option_1(\"abc\")\nERROR: MethodError: no method matching my_sum_option_1(::String)","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"and it allows other types to be supported in future by defining new methods:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"julia> function my_sum_option_1(x::Array{T,N}) where {T<:Number,N}\n y = zero(T)\n for i in eachindex(x)\n y += x[i]\n end\n return y\n end\nmy_sum_option_1 (generic function with 2 methods)","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Importantly, these methods do not have to be defined in the original package.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"info: Info\nSome usage of abstract types is okay. For example, in my_sum_option_1, we allowed the element type, T, to be a subtype of Number. This is fairly safe, but it still has an implicit assumption that T supports zero(T) and +(::T, ::T).","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Option 2: program defensively, and validate all assumptions.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"An alternative is to program defensively, and to rigorously document and validate all assumptions that the code makes. In particular:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"All assumptions on abstract types that aren't guaranteed by the definition of the abstract type (for example, optional methods without a fallback) should be documented.\nIf practical, the assumptions should be checked in code, and informative error messages should be provided to the user if the assumptions are not met. In general, these checks may be expensive, so you should prefer to do this once, at the highest level of the call-chain.\nTests should cover for a range of corner cases and argument types.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"For example:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"\"\"\"\n test_my_sum_defensive_assumptions(x::AbstractArray{T}) where {T}\n\nTest the assumptions made by `my_sum_defensive`.\n\"\"\"\nfunction test_my_sum_defensive_assumptions(x::AbstractArray{T}) where {T}\n try\n # Some types may not define zero.\n @assert zero(T) isa T\n # Check iteration supported\n @assert iterate(x) isa Union{Nothing,Tuple{T,Int}}\n # Check that + is defined\n @assert +(zero(T), zero(T)) isa Any\n catch err\n error(\n \"Unable to call my_sum_defensive(::$(typeof(x))) because \" *\n \"it failed an internal assumption\",\n )\n end\n return\nend\n\n\"\"\"\n my_sum_defensive(x::AbstractArray{T}) where {T}\n\nReturn the sum of the elements in the abstract array `x`.\n\n## Assumptions\n\nThis function makes the following assumptions:\n\n * That `zero(T)` is defined\n * That `x` supports the iteration interface\n * That `+(::T, ::T)` is defined\n\"\"\"\nfunction my_sum_defensive(x::AbstractArray{T}) where {T}\n test_my_sum_defensive_assumptions(x)\n y = zero(T)\n for xi in x\n y += xi\n end\n return y\nend\n\n# output\n\nmy_sum_defensive","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"This function works on Vector{Float64}:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"julia> my_sum_defensive([1.0, 2.0, 3.0])\n6.0","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"as well as Matrix{Rational{Int}}:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"julia> my_sum_defensive([(1//2) + (4//3)im; (6//5) + (7//11)im])\n17//10 + 65//33*im","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"and it throws an error when the assumptions aren't met:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"julia> my_sum_defensive(['a', 'b', 'c'])\nERROR: Unable to call my_sum_defensive(::Vector{Char}) because it failed an internal assumption\n[...]","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"As an alternative, you may choose not to call test_my_sum_defensive_assumptions within my_sum_defensive, and instead ask users of my_sum_defensive to call it in their tests.","category":"page"},{"location":"developers/style/#Juxtaposed-multiplication","page":"Style Guide","title":"Juxtaposed multiplication","text":"","category":"section"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Only use juxtaposed multiplication when the right-hand side is a symbol.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Good:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"2x # Acceptable if there are space constraints.\n2 * x # This is preferred if space is not an issue.\n2 * (x + 1)","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Bad:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"2(x + 1)","category":"page"},{"location":"developers/style/#Empty-vectors","page":"Style Guide","title":"Empty vectors","text":"","category":"section"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"For a type T, T[] and Vector{T}() are equivalent ways to create an empty vector with element type T. Prefer T[] because it is more concise.","category":"page"},{"location":"developers/style/#Comments","page":"Style Guide","title":"Comments","text":"","category":"section"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"For non-native speakers and for general clarity, comments in code must be proper English sentences with appropriate punctuation.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Good:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"# This is a comment demonstrating a good comment.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Bad:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"# a bad comment","category":"page"},{"location":"developers/style/#JuMP-macro-syntax","page":"Style Guide","title":"JuMP macro syntax","text":"","category":"section"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"For consistency, always use parentheses.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Good:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"@variable(model, x >= 0)","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Bad:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"@variable model x >= 0","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"For consistency, always use constant * variable as opposed to variable * constant. This makes it easier to read models in ambiguous cases like a * x.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Good:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"a = 4\n@constraint(model, 3 * x <= 1)\n@constraint(model, a * x <= 1)","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Bad:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"a = 4\n@constraint(model, x * 3 <= 1)\n@constraint(model, x * a <= 1)","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"In order to reduce boilerplate code, prefer the plural form of macros over lots of repeated calls to singular forms.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Good:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"@variables(model, begin\n x >= 0\n y >= 1\n z <= 2\nend)","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Bad:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"@variable(model, x >= 0)\n@variable(model, y >= 1)\n@variable(model, z <= 2)","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"An exception is made for calls with many keyword arguments, since these need to be enclosed in parentheses in order to parse properly.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Acceptable:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"@variable(model, x >= 0, start = 0.0, base_name = \"my_x\")\n@variable(model, y >= 1, start = 2.0)\n@variable(model, z <= 2, start = -1.0)","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Also acceptable:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"@variables(model, begin\n x >= 0, (start = 0.0, base_name = \"my_x\")\n y >= 1, (start = 2.0)\n z <= 2, (start = -1.0)\nend)","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"While we always use in for for-loops, it is acceptable to use = in the container declarations of JuMP macros.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Okay:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"@variable(model, x[i=1:3])","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Also okay:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"@variable(model, x[i in 1:3])","category":"page"},{"location":"developers/style/#Naming","page":"Style Guide","title":"Naming","text":"","category":"section"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"module SomeModule end\nfunction some_function end\nconst SOME_CONSTANT = ...\nstruct SomeStruct\n some_field::SomeType\nend\n@enum SomeEnum ENUM_VALUE_A ENUM_VALUE_B\nsome_local_variable = ...\nsome_file.jl # Except for ModuleName.jl.","category":"page"},{"location":"developers/style/#Exported-and-non-exported-names","page":"Style Guide","title":"Exported and non-exported names","text":"","category":"section"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Begin private module level functions and constants with an underscore. All other objects in the scope of a module should be exported. (See JuMP.jl for an example of how to do this.)","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Names beginning with an underscore should only be used for distinguishing between exported (public) and non-exported (private) objects. Therefore, never begin the name of a local variable with an underscore.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"module MyModule\n\nexport public_function, PUBLIC_CONSTANT\n\nfunction _private_function()\n local_variable = 1\n return\nend\n\nfunction public_function end\n\nconst _PRIVATE_CONSTANT = 3.14159\nconst PUBLIC_CONSTANT = 1.41421\n\nend","category":"page"},{"location":"developers/style/#Use-of-underscores-within-names","page":"Style Guide","title":"Use of underscores within names","text":"","category":"section"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"The Julia style guide recommends avoiding underscores \"when readable,\" for example, haskey, isequal, remotecall, and remotecall_fetch. This convention creates the potential for unnecessary bikeshedding and also forces the user to recall the presence/absence of an underscore, for example, \"was that argument named basename or base_name?\". For consistency, always use underscores in variable names and function names to separate words.","category":"page"},{"location":"developers/style/#Use-of-!","page":"Style Guide","title":"Use of !","text":"","category":"section"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Julia has a convention of appending ! to a function name if the function modifies its arguments. We recommend to:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Omit ! when the name itself makes it clear that modification is taking place, for example, add_constraint and set_name. We depart from the Julia style guide because ! does not provide a reader with any additional information in this case, and adherence to this convention is not uniform even in base Julia itself (consider Base.println and Base.finalize).\nUse ! in all other cases. In particular it can be used to distinguish between modifying and non-modifying variants of the same function like scale and scale!.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Note that ! is not a self-documenting feature because it is still ambiguous which arguments are modified when multiple arguments are present. Be sure to document which arguments are modified in the method's docstring.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"See also the Julia style guide recommendations for ordering of function arguments.","category":"page"},{"location":"developers/style/#Abbreviations","page":"Style Guide","title":"Abbreviations","text":"","category":"section"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Abbreviate names to make the code more readable, not to save typing. Don't arbitrarily delete letters from a word to abbreviate it (for example, indx). Use abbreviations consistently within a body of code (for example, do not mix con and constr, idx and indx).","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Common abbreviations:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"num for number\ncon for constraint","category":"page"},{"location":"developers/style/#No-one-letter-variable-names","page":"Style Guide","title":"No one-letter variable names","text":"","category":"section"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Where possible, avoid one-letter variable names.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Use model = Model() instead of m = Model()","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Exceptions are made for indices in loops.","category":"page"},{"location":"developers/style/#@enum-vs.-Symbol","page":"Style Guide","title":"@enum vs. Symbol","text":"","category":"section"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"The @enum macro lets you define types with a finite number of values that are explicitly enumerated (like enum in C/C++). Symbols are lightweight strings that are used to represent identifiers in Julia (for example, :x).","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"@enum provides type safety and can have docstrings attached to explain the possible values. Use @enums when applicable, for example, for reporting statuses. Use strings to provide long-form additional information like error messages.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Use of Symbol should typically be reserved for identifiers, for example, for lookup in the JuMP model (model[:my_variable]).","category":"page"},{"location":"developers/style/#using-vs.-import","page":"Style Guide","title":"using vs. import","text":"","category":"section"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"using ModuleName brings all symbols exported by the module ModuleName into scope, while import ModuleName brings only the module itself into scope. (See the Julia manual) for examples and more details.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"For the same reason that from import * is not recommended in python (PEP 8), avoid using ModuleName except in throw-away scripts or at the REPL. The using statement makes it harder to track where symbols come from and exposes the code to ambiguities when two modules export the same symbol.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Prefer using ModuleName: x, p to import ModuleName.x, ModuleName.p and import MyModule: x, p because the import versions allow method extension without qualifying with the module name.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Similarly, using ModuleName: ModuleName is an acceptable substitute for import ModuleName, because it does not bring all symbols exported by ModuleName into scope. However, we prefer import ModuleName for consistency.","category":"page"},{"location":"developers/style/#Documentation","page":"Style Guide","title":"Documentation","text":"","category":"section"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"This section describes the writing style that should be used when writing documentation for JuMP (and supporting packages).","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"We can recommend the documentation style guides by Divio, Google, and Write the Docs as general reading for those writing documentation. This guide delegates a thorough handling of the topic to those guides and instead elaborates on the points more specific to Julia and documentation that use Documenter.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Be concise\nUse lists instead of long sentences\nUse numbered lists when describing a sequence, for example, (1) do X, (2) then Y\nUse bullet points when the items are not ordered\nExample code should be covered by doctests\nWhen a word is a Julia symbol and not an English word, enclose it with backticks. In addition, if it has a docstring in this doc add a link using @ref. If it is a plural, add the \"s\" after the closing backtick. For example,\n[`VariableRef`](@ref)s\nUse @meta blocks for TODOs and other comments that shouldn't be visible to readers. For example,\n```@meta\n# TODO: Mention also X, Y, and Z.\n```","category":"page"},{"location":"developers/style/#Docstrings","page":"Style Guide","title":"Docstrings","text":"","category":"section"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Every exported object needs a docstring\nAll examples in docstrings should be jldoctests\nAlways use complete English sentences with proper punctuation\nDo not terminate lists with punctuation (for example, as in this doc)","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Here is an example:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"\"\"\"\n signature(args; kwargs...)\n\nShort sentence describing the function.\n\nOptional: add a slightly longer paragraph describing the function.\n\n## Notes\n\n - List any notes that the user should be aware of\n\n## Example\n\n```jldoctest\njulia> 1 + 1\n2\n```\n\"\"\"","category":"page"},{"location":"developers/style/#Testing","page":"Style Guide","title":"Testing","text":"","category":"section"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Use a module to encapsulate tests, and structure all tests as functions. This avoids leaking local variables between tests.","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Here is a basic skeleton:","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"module TestPkg\n\nusing Test\n\nfunction runtests()\n for name in names(@__MODULE__; all = true)\n if startswith(\"$(name)\", \"test_\")\n @testset \"$(name)\" begin\n getfield(@__MODULE__, name)()\n end\n end\n end\n return\nend\n\n_helper_function() = 2\n\nfunction test_addition()\n @test 1 + 1 == _helper_function()\n return\nend\n\nend # module TestPkg\n\nTestPkg.runtests()","category":"page"},{"location":"developers/style/","page":"Style Guide","title":"Style Guide","text":"Break the tests into multiple files, with one module per file, so that subsets of the codebase can be tested by calling include with the relevant file.","category":"page"},{"location":"moi/submodules/Bridges/implementation/","page":"Implementation","title":"Implementation","text":"EditURL = \"https://github.com/jump-dev/MathOptInterface.jl/blob/v1.34.0/docs/src/submodules/Bridges/implementation.md\"","category":"page"},{"location":"moi/submodules/Bridges/implementation/","page":"Implementation","title":"Implementation","text":"CurrentModule = MathOptInterface\nDocTestSetup = quote\n import MathOptInterface as MOI\nend\nDocTestFilters = [r\"MathOptInterface|MOI\"]","category":"page"},{"location":"moi/submodules/Bridges/implementation/#Implementing-a-bridge","page":"Implementation","title":"Implementing a bridge","text":"","category":"section"},{"location":"moi/submodules/Bridges/implementation/","page":"Implementation","title":"Implementation","text":"The easiest way to implement a bridge is to follow an existing example. There are three locations of bridges in the source code:","category":"page"},{"location":"moi/submodules/Bridges/implementation/","page":"Implementation","title":"Implementation","text":"Constraint bridges are stored in src/Bridges/Constraint/bridges\nObjective bridges are stored in src/Bridges/Objective/bridges\nVariable bridges are stored in src/Bridges/Variable/bridges","category":"page"},{"location":"moi/submodules/Bridges/implementation/","page":"Implementation","title":"Implementation","text":"The Implementing a constraint bridge tutorial has a more detailed guide on what is required to implement a bridge.","category":"page"},{"location":"moi/submodules/Bridges/implementation/","page":"Implementation","title":"Implementation","text":"When opening a pull request that adds a new bridge, use the checklist Adding a new bridge.","category":"page"},{"location":"moi/submodules/Bridges/implementation/","page":"Implementation","title":"Implementation","text":"If you need help or advice, please contact the Developer Chatroom.","category":"page"},{"location":"moi/submodules/Bridges/implementation/#SetMap-bridges","page":"Implementation","title":"SetMap bridges","text":"","category":"section"},{"location":"moi/submodules/Bridges/implementation/","page":"Implementation","title":"Implementation","text":"For constraint and variable bridges, a common reformulation is that f(x) in F is reformulated to g(x) in G. In this case, no additional variables and constraints are added, and the bridge needs only a way to map between the functions f and g and the sets F and G.","category":"page"},{"location":"moi/submodules/Bridges/implementation/","page":"Implementation","title":"Implementation","text":"To implementation a bridge of this form, subtype the abstract type Bridges.Constraint.SetMapBridge or Bridges.Variable.SetMapBridge and implement the API described in the docstring of each type.","category":"page"},{"location":"moi/submodules/Bridges/implementation/#final_touch","page":"Implementation","title":"final_touch","text":"","category":"section"},{"location":"moi/submodules/Bridges/implementation/","page":"Implementation","title":"Implementation","text":"Some bridges require information from other parts of the model. One set of examples are the various combinatorial ToMILP bridges, such as Bridges.Constraint.SOS1ToMILPBridge, which require knowledge of the variable bounds.","category":"page"},{"location":"moi/submodules/Bridges/implementation/","page":"Implementation","title":"Implementation","text":"Bridges requiring information from other parts of the model should implement Bridges.final_touch and Bridges.needs_final_touch.","category":"page"},{"location":"moi/submodules/Bridges/implementation/","page":"Implementation","title":"Implementation","text":"During the bridge's construction, store the function and set and make no changes to the underlying model. Then, in Bridges.final_touch, query the additional information and add the reformulated problem to the model.","category":"page"},{"location":"moi/submodules/Bridges/implementation/","page":"Implementation","title":"Implementation","text":"When implementing, you must consider that:","category":"page"},{"location":"moi/submodules/Bridges/implementation/","page":"Implementation","title":"Implementation","text":"Bridges.final_touch may be called multiple times, so that your reformulation should be applied only if necessary. Sometimes the additional data will be the same, and sometimes it may be different.\nWe do not currently support final_touch bridges that introduce constraints which also require a final_touch bridge. Therefore, you should implement final_touch only if necessary, and we recommend that you contact the Developer Chatroom for advice before doing so.","category":"page"},{"location":"moi/submodules/Bridges/implementation/#Testing","page":"Implementation","title":"Testing","text":"","category":"section"},{"location":"moi/submodules/Bridges/implementation/","page":"Implementation","title":"Implementation","text":"Use the Bridges.runtests function to test a bridge. It takes three arguments: the type of the bridge, the input model as a string, and the output model as a string.","category":"page"},{"location":"moi/submodules/Bridges/implementation/","page":"Implementation","title":"Implementation","text":"Here is an example:","category":"page"},{"location":"moi/submodules/Bridges/implementation/","page":"Implementation","title":"Implementation","text":"julia> MOI.Bridges.runtests(\n MOI.Bridges.Constraint.GreaterToLessBridge,\n \"\"\"\n variables: x\n x >= 1.0\n \"\"\",\n \"\"\"\n variables: x\n -1.0 * x <= -1.0\n \"\"\",\n )","category":"page"},{"location":"moi/submodules/Bridges/implementation/","page":"Implementation","title":"Implementation","text":"There are a number of other useful keyword arguments.","category":"page"},{"location":"moi/submodules/Bridges/implementation/","page":"Implementation","title":"Implementation","text":"eltype can be used to specify the element type of the model (and bridge). It defaults to Float64.\nvariable_start and constraint_start are used as the values to set the VariablePrimalStart and ConstraintPrimalStart attributes to. They default to 1.2. If you use a different eltype, you must set appropriate starting values of the same type. The default 1.2 was chosen to minimize the risk that the starting point is undefined, which could happen for common situations like 0.0 and 1.0. The tests associated with the starting values do not necessarily check for correctness, only that they can be set and get to produce the same result.\nprint_inner_model can be used to print the reformulated output model from the bridge. This is especially helpful during debugging to see what the bridge is doing, and to spot mistakes. It defaults to false.","category":"page"},{"location":"moi/submodules/Bridges/implementation/","page":"Implementation","title":"Implementation","text":"Here is an example:","category":"page"},{"location":"moi/submodules/Bridges/implementation/","page":"Implementation","title":"Implementation","text":"julia> MOI.Bridges.runtests(\n MOI.Bridges.Constraint.GreaterToLessBridge,\n \"\"\"\n variables: x\n x >= 1\n \"\"\",\n \"\"\"\n variables: x\n ::Int: -1 * x <= -1\n \"\"\";\n eltype = Int,\n print_inner_model = true,\n variable_start = 2,\n constraint_start = 2,\n )\nFeasibility\n\nSubject to:\n\nScalarAffineFunction{Int64}-in-LessThan{Int64}\n (0) - (1) x <= (-1)","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"EditURL = \"https://github.com/jump-dev/MathOptInterface.jl/blob/v1.34.0/docs/src/submodules/Test/overview.md\"","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"CurrentModule = MathOptInterface\nDocTestSetup = quote\n import MathOptInterface as MOI\nend\nDocTestFilters = [r\"MathOptInterface|MOI\"]","category":"page"},{"location":"moi/submodules/Test/overview/#test_module","page":"Overview","title":"The Test submodule","text":"","category":"section"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"The Test submodule provides tools to help solvers implement unit tests in order to ensure they implement the MathOptInterface API correctly, and to check for solver-correctness.","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"We use a centralized repository of tests, so that if we find a bug in one solver, instead of adding a test to that particular repository, we add it here so that all solvers can benefit.","category":"page"},{"location":"moi/submodules/Test/overview/#How-to-test-a-solver","page":"Overview","title":"How to test a solver","text":"","category":"section"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"The skeleton below can be used for the wrapper test file of a solver named FooBar.","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"# ============================ /test/MOI_wrapper.jl ============================\nmodule TestFooBar\n\nimport FooBar\nusing Test\n\nimport MathOptInterface as MOI\n\nconst OPTIMIZER = MOI.instantiate(\n MOI.OptimizerWithAttributes(FooBar.Optimizer, MOI.Silent() => true),\n)\n\nconst BRIDGED = MOI.instantiate(\n MOI.OptimizerWithAttributes(FooBar.Optimizer, MOI.Silent() => true),\n with_bridge_type = Float64,\n)\n\n# See the docstring of MOI.Test.Config for other arguments.\nconst CONFIG = MOI.Test.Config(\n # Modify tolerances as necessary.\n atol = 1e-6,\n rtol = 1e-6,\n # Use MOI.LOCALLY_SOLVED for local solvers.\n optimal_status = MOI.OPTIMAL,\n # Pass attributes or MOI functions to `exclude` to skip tests that\n # rely on this functionality.\n exclude = Any[MOI.VariableName, MOI.delete],\n)\n\n\"\"\"\n runtests()\n\nThis function runs all functions in the this Module starting with `test_`.\n\"\"\"\nfunction runtests()\n for name in names(@__MODULE__; all = true)\n if startswith(\"$(name)\", \"test_\")\n @testset \"$(name)\" begin\n getfield(@__MODULE__, name)()\n end\n end\n end\nend\n\n\"\"\"\n test_runtests()\n\nThis function runs all the tests in MathOptInterface.Test.\n\nPass arguments to `exclude` to skip tests for functionality that is not\nimplemented or that your solver doesn't support.\n\"\"\"\nfunction test_runtests()\n MOI.Test.runtests(\n BRIDGED,\n CONFIG,\n exclude = [\n \"test_attribute_NumberOfThreads\",\n \"test_quadratic_\",\n ],\n # This argument is useful to prevent tests from failing on future\n # releases of MOI that add new tests. Don't let this number get too far\n # behind the current MOI release though. You should periodically check\n # for new tests to fix bugs and implement new features.\n exclude_tests_after = v\"0.10.5\",\n )\n return\nend\n\n\"\"\"\n test_SolverName()\n\nYou can also write new tests for solver-specific functionality. Write each new\ntest as a function with a name beginning with `test_`.\n\"\"\"\nfunction test_SolverName()\n @test MOI.get(FooBar.Optimizer(), MOI.SolverName()) == \"FooBar\"\n return\nend\n\nend # module TestFooBar\n\n# This line at tne end of the file runs all the tests!\nTestFooBar.runtests()","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"Then modify your runtests.jl file to include the MOI_wrapper.jl file:","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"# ============================ /test/runtests.jl ============================\n\nusing Test\n\n@testset \"MOI\" begin\n include(\"test/MOI_wrapper.jl\")\nend","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"info: Info\nThe optimizer BRIDGED constructed with instantiate automatically bridges constraints that are not supported by OPTIMIZER using the bridges listed in Bridges. It is recommended for an implementation of MOI to only support constraints that are natively supported by the solver and let bridges transform the constraint to the appropriate form. For this reason it is expected that tests may not pass if OPTIMIZER is used instead of BRIDGED.","category":"page"},{"location":"moi/submodules/Test/overview/#How-to-debug-a-failing-test","page":"Overview","title":"How to debug a failing test","text":"","category":"section"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"When writing a solver, it's likely that you will initially fail many tests. Some failures will be bugs, but other failures you may choose to exclude.","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"There are two ways to exclude tests:","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"Exclude tests whose names contain a string using:\nMOI.Test.runtests(\n model,\n config;\n exclude = String[\"test_to_exclude\", \"test_conic_\"],\n)\nThis will exclude tests whose name contains either of the two strings provided.\nExclude tests which rely on specific functionality using:\nMOI.Test.Config(exclude = Any[MOI.VariableName, MOI.optimize!])\nThis will exclude tests which use the MOI.VariableName attribute, or which call MOI.optimize!.","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"Each test that fails can be independently called as:","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"model = FooBar.Optimizer()\nconfig = MOI.Test.Config()\nMOI.empty!(model)\nMOI.Test.test_category_name_that_failed(model, config)","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"You can look-up the source code of the test that failed by searching for it in the src/Test/test_category.jl file.","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"tip: Tip\nEach test function also has a docstring that explains what the test is for. Use ? MOI.Test.test_category_name_that_failed from the REPL to read it.","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"Periodically, you should re-run excluded tests to see if they now pass. The easiest way to do this is to swap the exclude keyword argument of runtests to include. For example:","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"MOI.Test.runtests(\n model,\n config;\n exclude = String[\"test_to_exclude\", \"test_conic_\"],\n)","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"becomes","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"MOI.Test.runtests(\n model,\n config;\n include = String[\"test_to_exclude\", \"test_conic_\"],\n)","category":"page"},{"location":"moi/submodules/Test/overview/#How-to-add-a-test","page":"Overview","title":"How to add a test","text":"","category":"section"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"To detect bugs in solvers, we add new tests to MOI.Test.","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"As an example, ECOS errored calling optimize! twice in a row. (See ECOS.jl PR #72.) We could add a test to ECOS.jl, but that would only stop us from re-introducing the bug to ECOS.jl in the future, but it would not catch other solvers in the ecosystem with the same bug. Instead, if we add a test to MOI.Test, then all solvers will also check that they handle a double optimize call.","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"For this test, we care about correctness, rather than performance. therefore, we don't expect solvers to efficiently decide that they have already solved the problem, only that calling optimize! twice doesn't throw an error or give the wrong answer.","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"Step 1","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"Install the MathOptInterface julia package in dev mode:","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"julia> ]\n(@v1.6) pkg> dev MathOptInterface","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"Step 2","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"From here on, proceed with making the following changes in the ~/.julia/dev/MathOptInterface folder (or equivalent dev path on your machine).","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"Step 3","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"Since the double-optimize error involves solving an optimization problem, add a new test to src/Test/test_solve.jl:","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"\"\"\"\n test_unit_optimize!_twice(model::MOI.ModelLike, config::Config)\n\nTest that calling `MOI.optimize!` twice does not error.\n\nThis problem was first detected in ECOS.jl PR#72:\nhttps://github.com/jump-dev/ECOS.jl/pull/72\n\"\"\"\nfunction test_unit_optimize!_twice(\n model::MOI.ModelLike,\n config::Config{T},\n) where {T}\n # Use the `@requires` macro to check conditions that the test function\n # requires to run. Models failing this `@requires` check will silently skip\n # the test.\n @requires MOI.supports_constraint(\n model,\n MOI.VariableIndex,\n MOI.GreaterThan{Float64},\n )\n @requires _supports(config, MOI.optimize!)\n # If needed, you can test that the model is empty at the start of the test.\n # You can assume that this will be the case for tests run via `runtests`.\n # User's calling tests individually need to call `MOI.empty!` themselves.\n @test MOI.is_empty(model)\n # Create a simple model. Try to make this as simple as possible so that the\n # majority of solvers can run the test.\n x = MOI.add_variable(model)\n MOI.add_constraint(model, x, MOI.GreaterThan(one(T)))\n MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)\n MOI.set(\n model,\n MOI.ObjectiveFunction{MOI.VariableIndex}(),\n x,\n )\n # The main component of the test: does calling `optimize!` twice error?\n MOI.optimize!(model)\n MOI.optimize!(model)\n # Check we have a solution.\n @test MOI.get(model, MOI.TerminationStatus()) == MOI.OPTIMAL\n # There is a three-argument version of `Base.isapprox` for checking\n # approximate equality based on the tolerances defined in `config`:\n @test isapprox(MOI.get(model, MOI.VariablePrimal(), x), one(T), config)\n # For code-style, these tests should always `return` `nothing`.\n return\nend","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"info: Info\nMake sure the function is agnostic to the number type T; don't assume it is a Float64 capable solver.","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"We also need to write a test for the test. Place this function immediately below the test you just wrote in the same file:","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"function setup_test(\n ::typeof(test_unit_optimize!_twice),\n model::MOI.Utilities.MockOptimizer,\n ::Config,\n)\n MOI.Utilities.set_mock_optimize!(\n model,\n (mock::MOI.Utilities.MockOptimizer) -> MOIU.mock_optimize!(\n mock,\n MOI.OPTIMAL,\n (MOI.FEASIBLE_POINT, [1.0]),\n ),\n )\n return\nend","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"Finally, you also need to implement Test.version_added. If we added this test when the latest released version of MOI was v0.10.5, define:","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"version_added(::typeof(test_unit_optimize!_twice)) = v\"0.10.6\"","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"Step 6","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"Commit the changes to git from ~/.julia/dev/MathOptInterface and submit the PR for review.","category":"page"},{"location":"moi/submodules/Test/overview/","page":"Overview","title":"Overview","text":"tip: Tip\nIf you need help writing a test, open an issue on GitHub, or ask the Developer Chatroom.","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"EditURL = \"pdhg.jl\"","category":"page"},{"location":"tutorials/algorithms/pdhg/#Writing-a-solver-interface","page":"Writing a solver interface","title":"Writing a solver interface","text":"","category":"section"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"This tutorial was generated using Literate.jl. Download the source as a .jl file.","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"The purpose of this tutorial is to demonstrate how to implement a basic solver interface to MathOptInterface. As a motivating example, we implement the Primal Dual Hybrid Gradient (PDHG) method. PDHG is a first-order method that can solve convex optimization problems.","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"Google has a good introduction to the math behind PDLP, which is a variant of PDHG specialized for linear programs.","category":"page"},{"location":"tutorials/algorithms/pdhg/#Required-packages","page":"Writing a solver interface","title":"Required packages","text":"","category":"section"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"This tutorial requires the following packages:","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"using JuMP\nimport LinearAlgebra\nimport MathOptInterface as MOI\nimport Printf\nimport SparseArrays","category":"page"},{"location":"tutorials/algorithms/pdhg/#Primal-Dual-Hybrid-Gradient","page":"Writing a solver interface","title":"Primal Dual Hybrid Gradient","text":"","category":"section"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"The following function is a pedagogical implementation of PDHG that solves the linear program:","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"beginaligned\ntextmin c^top x \ntextsubject to Ax = b \n x ge 0\nendaligned","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"Note that this implementation is intentionally kept simple. It is not robust nor efficient, and it does not incorporate the theoretical improvements in the PDLP paper. It does use two workspace vectors so that the body of the iteration loop is non-allocating.","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"function solve_pdhg(\n A::SparseArrays.SparseMatrixCSC{Float64,Int},\n b::Vector{Float64},\n c::Vector{Float64};\n maximum_iterations::Int = 100_000,\n tol::Float64 = 1e-4,\n verbose::Bool = true,\n log_frequency::Int = 1_000,\n)\n printf(x::Float64) = Printf.@sprintf(\"% 1.6e\", x)\n printf(x::Int) = Printf.@sprintf(\"%6d\", x)\n m, n = size(A)\n η = τ = 1 / LinearAlgebra.norm(A) - 1e-6\n x, x_next, y, k, status = zeros(n), zeros(n), zeros(m), 0, MOI.OTHER_ERROR\n m_workspace, n_workspace = zeros(m), zeros(n)\n if verbose\n println(\n \" iter pobj dobj pfeas dfeas objfeas\",\n )\n end\n while status == MOI.OTHER_ERROR\n k += 1\n # =====================================================================\n # This block computes x_next = max.(0.0, x - η * (A' * y + c))\n LinearAlgebra.mul!(x_next, A', y)\n LinearAlgebra.axpby!(-η, c, -η, x_next)\n x_next .+= x\n x_next .= max.(0.0, x_next)\n # =====================================================================\n # This block computes y += τ * (A * (2 * x_next - x) - b)\n copy!(n_workspace, x_next)\n LinearAlgebra.axpby!(-1.0, x, 2.0, n_workspace)\n LinearAlgebra.mul!(y, A, n_workspace, τ, 1.0)\n LinearAlgebra.axpy!(-τ, b, y)\n # =====================================================================\n copy!(x, x_next)\n # =====================================================================\n # This block computes pfeas = LinearAlgebra.norm(A * x - b)\n LinearAlgebra.mul!(m_workspace, A, x)\n m_workspace .-= b\n pfeas = LinearAlgebra.norm(m_workspace)\n # =====================================================================\n # This block computes dfeas = LinearAlgebra.norm(min.(0.0, A' * y + c))\n LinearAlgebra.mul!(n_workspace, A', y)\n n_workspace .+= c\n n_workspace .= min.(0.0, n_workspace)\n dfeas = LinearAlgebra.norm(n_workspace)\n # =====================================================================\n objfeas = abs(LinearAlgebra.dot(c, x) + LinearAlgebra.dot(b, y))\n if pfeas <= tol && dfeas <= tol && objfeas <= tol\n status = MOI.OPTIMAL\n elseif k == maximum_iterations\n status = MOI.ITERATION_LIMIT\n end\n if verbose && (mod(k, log_frequency) == 0 || status != MOI.OTHER_ERROR)\n logs = printf.((k, c' * x, -b' * y, pfeas, dfeas, objfeas))\n println(join(logs, \" \"))\n end\n end\n return status, k, x, y\nend","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"Here's an example:","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"A = [0.0 -1.0 -1.0 0.0 0.0; 6.0 8.0 0.0 -1.0 0.0; 7.0 12.0 0.0 0.0 -1.0]\nb = [-3.0, 100.0, 120.0]\nc = [12.0, 20.0, 0.0, 0.0, 0.0]\nstatus, k, x, y = solve_pdhg(SparseArrays.sparse(A), b, c);\nnothing #hide","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"The termination status is:","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"status","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"The solve took the following number of iterations:","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"k","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"The primal solution is:","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"x","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"The dual multipliers are:","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"y","category":"page"},{"location":"tutorials/algorithms/pdhg/#The-MOI-interface","page":"Writing a solver interface","title":"The MOI interface","text":"","category":"section"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"Converting a linear program from the modeler's form into the A, b, and c matrices of the standard form required by our implementation of PDHG is tedious and error-prone. This section walks through how to implement a basic interface to MathOptInterface, so that we can use our algorithm from JuMP.","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"For a more comprehensive guide, see Implementing a solver interface.","category":"page"},{"location":"tutorials/algorithms/pdhg/#The-Optimizer-type","page":"Writing a solver interface","title":"The Optimizer type","text":"","category":"section"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"Create an optimizer by subtyping MOI.AbstractOptimizer. By convention, the name of this type is Optimizer, and most optimizers are available as PackageName.Optimizer.","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"The fields inside the optimizer are arbitrary. Store whatever is useful.","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"\"\"\"\n Optimizer()\n\nCreate a new optimizer for PDHG.\n\"\"\"\nmutable struct Optimizer <: MOI.AbstractOptimizer\n # A mapping from variable to column\n x_to_col::Dict{MOI.VariableIndex,Int}\n # A mapping from constraint to rows\n ci_to_rows::Dict{\n MOI.ConstraintIndex{MOI.VectorAffineFunction{Float64},MOI.Zeros},\n Vector{Int},\n }\n # Information from solve_pdhg\n status::MOI.TerminationStatusCode\n iterations::Int\n x::Vector{Float64}\n y::Vector{Float64}\n # Other useful quantities\n solve_time::Float64\n obj_value::Float64\n\n function Optimizer()\n F = MOI.VectorAffineFunction{Float64}\n return new(\n Dict{MOI.VariableIndex,Int}(),\n Dict{MOI.ConstraintIndex{F,MOI.Zeros},Vector{Int}}(),\n MOI.OPTIMIZE_NOT_CALLED,\n 0,\n Float64[],\n Float64[],\n 0.0,\n 0.0,\n )\n end\nend","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"Now that we have an Optimizer, we need to implement two methods: MOI.is_empty and MOI.empty!. These are called whenever MOI needs to ensure that the optimizer is in a clean state.","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"function MOI.is_empty(model::Optimizer)\n # You might want to check every field, not just a few\n return isempty(model.x_to_col) && model.status == MOI.OPTIMIZE_NOT_CALLED\nend\n\nfunction MOI.empty!(model::Optimizer)\n empty!(model.x_to_col)\n empty!(model.ci_to_rows)\n model.status = MOI.OPTIMIZE_NOT_CALLED\n model.iterations = 0\n model.solve_time = 0.0\n model.obj_value = 0.0\n empty!(model.x)\n empty!(model.y)\n return\nend","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"Next, we need to define what constraints the optimizer supports. Since our standard form was Ax = b, we support only Ax + b in 0, which is a MOI.VectorAffineFunction in MOI.Zeros constraint. Note that you might have expected Ax - b in 0. We'll address the difference in the sign of b in a few places later on.","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"function MOI.supports_constraint(\n ::Optimizer,\n ::Type{MOI.VectorAffineFunction{Float64}},\n ::Type{MOI.Zeros},\n)\n return true\nend","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"By default, MOI assumes that it can add free variables. This isn't true for our standard form, because we support only x ge 0. Let's tell MOI that:","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"MOI.supports_add_constrained_variables(::Optimizer, ::Type{MOI.Reals}) = false\n\nfunction MOI.supports_add_constrained_variables(\n ::Optimizer,\n ::Type{MOI.Nonnegatives},\n)\n return true\nend","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"The objective function that we support is MOI.ScalarAffineFunction:","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"function MOI.supports(\n ::Optimizer,\n ::MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}},\n)\n return true\nend","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"Finally, we'll implement MOI.SolverName so that MOI knows how to print the name of our optimizer:","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"MOI.get(::Optimizer, ::MOI.SolverName) = \"PDHG\"","category":"page"},{"location":"tutorials/algorithms/pdhg/#GenericModel","page":"Writing a solver interface","title":"GenericModel","text":"","category":"section"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"The simplest way to solve a problem with your optimizer is to implement the method MOI.optimize!(dest::Optimizer, src::MOI.ModelLike), where src is an input model and dest is your empty optimizer.","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"To implement this method you would need to query the variables and constraints in src and the convert these into the matrix data expected by solve_pdhg. Since matrix input is a common requirement of solvers, MOI includes utilities to simplify the process.","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"The downside of the utilities is that they involve a highly parameterized type with a large number of possible configurations.The upside of the utilities is that, once setup, they requires few lines of code to extract the problem matrices.","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"First, we need to define the set of sets that our standard form supports. For PDHG, we support only Ax + b in {0}:","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"MOI.Utilities.@product_of_sets(SetOfZeros, MOI.Zeros)","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"Then, we define a MOI.Utilities.GenericModel. This is the highly parameterized type that can be customized.","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"const CacheModel = MOI.Utilities.GenericModel{\n # The coefficient type is Float64\n Float64,\n # We use the default objective container\n MOI.Utilities.ObjectiveContainer{Float64},\n # We use the default variable container\n MOI.Utilities.VariablesContainer{Float64},\n # We use a Matrix of Constraints to represent `A * x + b in K`\n MOI.Utilities.MatrixOfConstraints{\n # The number type is Float64\n Float64,\n # The matrix type `A` is a sparse matrix\n MOI.Utilities.MutableSparseMatrixCSC{\n # ... with Float64 coefficients\n Float64,\n # ... Int64 row and column indices\n Int,\n # ... and it uses one-based indexing\n MOI.Utilities.OneBasedIndexing,\n },\n # The vector type `b` is a Julia `Vector`\n Vector{Float64},\n # The set type `K` is the SetOfZeros type we defined above\n SetOfZeros{Float64},\n },\n}","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"As one example of possible alternate configuration, if you were interfacing with a solver written in C that expected zero-based indices, you might use instead:","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"MOI.Utilities.MutableSparseMatrixCSC{\n Cdouble,\n Cint,\n MOI.Utilities.ZeroBasedIndexing,\n}","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"tip: Tip\nThe best place to look at how to configure GenericModel is to find an existing solver with the same input standard form that you require.","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"We need to make one modification to CacheModel to tell MOI that x in mathbbR_+ is equivalent to adding variables in MOI.GreaterThan:","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"function MOI.add_constrained_variables(model::CacheModel, set::MOI.Nonnegatives)\n x = MOI.add_variables(model, MOI.dimension(set))\n MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))\n ci = MOI.ConstraintIndex{MOI.VectorOfVariables,MOI.Nonnegatives}(x[1].value)\n return x, ci\nend","category":"page"},{"location":"tutorials/algorithms/pdhg/#The-optimize-method","page":"Writing a solver interface","title":"The optimize method","text":"","category":"section"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"Now we define the most important method for our optimizer.","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"function MOI.optimize!(dest::Optimizer, src::MOI.ModelLike)\n # In addition to the values returned by `solve_pdhg`, it may be useful to\n # record other attributes, such as the solve time.\n start_time = time()\n # Construct a cache to store our problem data:\n cache = CacheModel()\n # MOI includes a utility to copy an arbitrary `src` model into `cache`. The\n # return, `index_map`, is a mapping from indices in `src` to indices in\n # `dest`.\n index_map = MOI.copy_to(cache, src)\n # Now we can access the `A` matrix:\n A = convert(\n SparseArrays.SparseMatrixCSC{Float64,Int},\n cache.constraints.coefficients,\n )\n # and the b vector (note that MOI models Ax = b as Ax + b in {0}, so b\n # differs by -):\n b = -cache.constraints.constants\n # The `c` vector is more involved, because we need to account for the\n # objective sense:\n sense = ifelse(cache.objective.sense == MOI.MAX_SENSE, -1, 1)\n F = MOI.ScalarAffineFunction{Float64}\n obj = MOI.get(src, MOI.ObjectiveFunction{F}())\n c = zeros(size(A, 2))\n for term in obj.terms\n c[term.variable.value] += sense * term.coefficient\n end\n # Now we can solve the problem with PDHG and record the solution:\n dest.status, dest.iterations, dest.x, dest.y = solve_pdhg(A, b, c)\n # To help assign the values of the x and y vectors to the appropriate\n # variables and constrats, we need a map of the constraint indices to their\n # row in the `dest` matrix and a map of the variable indices to their\n # column in the `dest` matrix:\n F, S = MOI.VectorAffineFunction{Float64}, MOI.Zeros\n for src_ci in MOI.get(src, MOI.ListOfConstraintIndices{F,S}())\n dest.ci_to_rows[index_map[src_ci]] =\n MOI.Utilities.rows(cache.constraints.sets, index_map[src_ci])\n end\n for (i, src_x) in enumerate(MOI.get(src, MOI.ListOfVariableIndices()))\n dest.x_to_col[index_map[src_x]] = i\n end\n # We can now record two derived quantities: the primal objective value and\n # the solve time.\n dest.obj_value = obj.constant + sense * c' * dest.x\n dest.solve_time = time() - start_time\n # We need to return the index map, and `false`, to indicate to MOI that we\n # do not support incremental modification of the model.\n return index_map, false\nend","category":"page"},{"location":"tutorials/algorithms/pdhg/#Solutions","page":"Writing a solver interface","title":"Solutions","text":"","category":"section"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"Now that we know how to solve a model, let's implement the required solution attributes.","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"First, we need to tell MOI how many solutions we found via MOI.ResultCount:","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"function MOI.get(model::Optimizer, ::MOI.ResultCount)\n return model.status == MOI.OPTIMAL ? 1 : 0\nend","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"and implement MOI.RawStatusString to provide a user-readable string that describes what happened:","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"function MOI.get(model::Optimizer, ::MOI.RawStatusString)\n if model.status == MOI.OPTIMAL\n return \"found a primal-dual optimal solution (subject to tolerances)\"\n end\n return \"failed to solve\"\nend","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"Then, we need to implement the three types of problem status: MOI.TerminationStatus, MOI.PrimalStatus and MOI.DualStatus:","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"MOI.get(model::Optimizer, ::MOI.TerminationStatus) = model.status\n\nfunction MOI.get(model::Optimizer, attr::Union{MOI.PrimalStatus,MOI.DualStatus})\n if attr.result_index == 1 && model.status == MOI.OPTIMAL\n return MOI.FEASIBLE_POINT\n end\n return MOI.NO_SOLUTION\nend","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"Now we can implement MOI.ObjectiveValue, MOI.VariablePrimal, and MOI.ConstraintDual:","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"function MOI.get(model::Optimizer, attr::MOI.ObjectiveValue)\n MOI.check_result_index_bounds(model, attr)\n return model.obj_value\nend\n\nfunction MOI.get(\n model::Optimizer,\n attr::MOI.VariablePrimal,\n x::MOI.VariableIndex,\n)\n MOI.check_result_index_bounds(model, attr)\n return model.x[model.x_to_col[x]]\nend\n\nfunction MOI.get(\n model::Optimizer,\n attr::MOI.ConstraintDual,\n ci::MOI.ConstraintIndex{MOI.VectorAffineFunction{Float64},MOI.Zeros},\n)\n MOI.check_result_index_bounds(model, attr)\n # MOI models Ax = b as Ax + b in {0}, so the dual differs by -\n return -model.y[model.ci_to_rows[ci]]\nend","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"Some other useful result quantities are MOI.SolveTimeSec and MOI.BarrierIterations:","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"MOI.get(model::Optimizer, ::MOI.SolveTimeSec) = model.solve_time\nMOI.get(model::Optimizer, ::MOI.BarrierIterations) = model.iterations","category":"page"},{"location":"tutorials/algorithms/pdhg/#A-JuMP-example","page":"Writing a solver interface","title":"A JuMP example","text":"","category":"section"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"Now we can solve an arbitrary linear program with JuMP. Here's the same standard form as before:","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"model = Model(Optimizer)\n@variable(model, x[1:5] >= 0)\n@objective(model, Min, c' * x)\n@constraint(model, c3, A * x == b)\noptimize!(model)","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"solution_summary(model; verbose = true)","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"But we could also have written:","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"model = Model(Optimizer)\n@variable(model, x >= 0)\n@variable(model, 0 <= y <= 3)\n@objective(model, Min, 12x + 20y)\n@constraint(model, c1, 6x + 8y >= 100)\n@constraint(model, c2, 7x + 12y >= 120)\noptimize!(model)","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"solution_summary(model; verbose = true)","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"Other variations are also possible:","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"model = Model(Optimizer)\n@variable(model, x[1:5] >= 0)\n@objective(model, Max, -c' * x)\n@constraint(model, c4, A * x .== b)\noptimize!(model)","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"solution_summary(model; verbose = true)","category":"page"},{"location":"tutorials/algorithms/pdhg/","page":"Writing a solver interface","title":"Writing a solver interface","text":"Behind the scenes, JuMP and MathOptInterface reformulate the problem from the modeller's form into the standard form defined by our Optimizer.","category":"page"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"EditURL = \"https://github.com/jump-dev/MathOptInterface.jl/blob/v1.34.0/docs/src/submodules/Utilities/reference.md\"","category":"page"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"CurrentModule = MathOptInterface\nDocTestSetup = quote\n import MathOptInterface as MOI\nend\nDocTestFilters = [r\"MathOptInterface|MOI\"]","category":"page"},{"location":"moi/submodules/Utilities/reference/#Utilities.Model","page":"API Reference","title":"Utilities.Model","text":"","category":"section"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"Utilities.Model","category":"page"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.Model","page":"API Reference","title":"MathOptInterface.Utilities.Model","text":"MOI.Utilities.Model{T}() where {T}\n\nAn implementation of ModelLike that supports all functions and sets defined in MOI. It is parameterized by the coefficient type.\n\nExample\n\njulia> import MathOptInterface as MOI\n\njulia> model = MOI.Utilities.Model{Float64}()\nMOIU.Model{Float64}\n├ ObjectiveSense: FEASIBILITY_SENSE\n├ ObjectiveFunctionType: MOI.ScalarAffineFunction{Float64}\n├ NumberOfVariables: 0\n└ NumberOfConstraints: 0\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#Utilities.UniversalFallback","page":"API Reference","title":"Utilities.UniversalFallback","text":"","category":"section"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"Utilities.UniversalFallback","category":"page"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.UniversalFallback","page":"API Reference","title":"MathOptInterface.Utilities.UniversalFallback","text":"UniversalFallback\n\nThe UniversalFallback can be applied on a MOI.ModelLike model to create the model UniversalFallback(model) supporting any constraint and attribute. This allows to have a specialized implementation in model for performance critical constraints and attributes while still supporting other attributes with a small performance penalty. Note that model is unaware of constraints and attributes stored by UniversalFallback so this is not appropriate if model is an optimizer (for this reason, MOI.optimize! has not been implemented). In that case, optimizer bridges should be used instead.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#Utilities.@model","page":"API Reference","title":"Utilities.@model","text":"","category":"section"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"Utilities.@model\nUtilities.GenericModel\nUtilities.GenericOptimizer","category":"page"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.@model","page":"API Reference","title":"MathOptInterface.Utilities.@model","text":"macro model(\n model_name,\n scalar_sets,\n typed_scalar_sets,\n vector_sets,\n typed_vector_sets,\n scalar_functions,\n typed_scalar_functions,\n vector_functions,\n typed_vector_functions,\n is_optimizer = false\n)\n\nCreates a type model_name implementing the MOI model interface and supporting all combinations of the provided functions and sets.\n\nEach typed_ scalar/vector sets/functions argument is a tuple of types. A type is \"typed\" if it has a coefficient {T} as the first type parameter.\n\nTuple syntax\n\nTo give no set/function, write (). To give one set or function X, write (X,).\n\nis_optimizer\n\nIf is_optimizer = true, the resulting struct is a of GenericOptimizer, which is a subtype of MOI.AbstractOptimizer, otherwise, it is a GenericModel, which is a subtype of MOI.ModelLike.\n\nVariableIndex\n\nThe function MOI.VariableIndex must not be given in scalar_functions.\nThe model supports MOI.VariableIndex-in-S constraints where S is MOI.EqualTo, MOI.GreaterThan, MOI.LessThan, MOI.Interval, MOI.Integer, MOI.ZeroOne, MOI.Semicontinuous or MOI.Semiinteger.\nThe sets supported with MOI.VariableIndex cannot be controlled from the macro; use UniversalFallback to support more sets.\n\nExample\n\nThe model describing a linear program would be:\n\n@model(\n LPModel, # model_name\n (), # untyped scalar sets\n (MOI.EqualTo, MOI.GreaterThan, MOI.LessThan, MOI.Interval), # typed scalar sets\n (MOI.Zeros, MOI.Nonnegatives, MOI.Nonpositives), # untyped vector sets\n (), # typed vector sets\n (), # untyped scalar functions\n (MOI.ScalarAffineFunction,), # typed scalar functions\n (MOI.VectorOfVariables,), # untyped vector functions\n (MOI.VectorAffineFunction,), # typed vector functions\n false, # is_optimizer\n)\n\n\n\n\n\n","category":"macro"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.GenericModel","page":"API Reference","title":"MathOptInterface.Utilities.GenericModel","text":"mutable struct GenericModel{T,O,V,C} <: AbstractModelLike{T}\n\nImplements a model supporting coefficients of type T and:\n\nAn objective function stored in .objective::O\nVariables and VariableIndex constraints stored in .variable_bounds::V\nF-in-S constraints (excluding VariableIndex constraints) stored in .constraints::C\n\nAll interactions take place via the MOI interface, so the types O, V, and C must implement the API as needed for their functionality.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.GenericOptimizer","page":"API Reference","title":"MathOptInterface.Utilities.GenericOptimizer","text":"mutable struct GenericOptimizer{T,O,V,C} <: AbstractOptimizer{T}\n\nImplements a model supporting coefficients of type T and:\n\nAn objective function stored in .objective::O\nVariables and VariableIndex constraints stored in .variable_bounds::V\nF-in-S constraints (excluding VariableIndex constraints) stored in .constraints::C\n\nAll interactions take place via the MOI interface, so the types O, V, and C must implement the API as needed for their functionality.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#.objective","page":"API Reference","title":".objective","text":"","category":"section"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"Utilities.ObjectiveContainer","category":"page"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.ObjectiveContainer","page":"API Reference","title":"MathOptInterface.Utilities.ObjectiveContainer","text":"ObjectiveContainer{T}\n\nA helper struct to simplify the handling of objective functions in Utilities.Model.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#.variables","page":"API Reference","title":".variables","text":"","category":"section"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"Utilities.VariablesContainer\nUtilities.FreeVariables","category":"page"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.VariablesContainer","page":"API Reference","title":"MathOptInterface.Utilities.VariablesContainer","text":"struct VariablesContainer{T} <: AbstractVectorBounds\n set_mask::Vector{UInt16}\n lower::Vector{T}\n upper::Vector{T}\nend\n\nA struct for storing variables and VariableIndex-related constraints. Used in MOI.Utilities.Model by default.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.FreeVariables","page":"API Reference","title":"MathOptInterface.Utilities.FreeVariables","text":"mutable struct FreeVariables <: MOI.ModelLike\n n::Int64\n FreeVariables() = new(0)\nend\n\nA struct for storing free variables that can be used as the variables field of GenericModel or GenericModel. It represents a model that does not support any constraint nor objective function.\n\nExample\n\nThe following model type represents a conic model in geometric form. As opposed to VariablesContainer, FreeVariables does not support constraint bounds so they are bridged into an affine constraint in the MOI.Nonnegatives cone as expected for the geometric conic form.\n\njulia> MOI.Utilities.@product_of_sets(\n Cones,\n MOI.Zeros,\n MOI.Nonnegatives,\n MOI.SecondOrderCone,\n MOI.PositiveSemidefiniteConeTriangle,\n);\n\njulia> const ConicModel{T} = MOI.Utilities.GenericOptimizer{\n T,\n MOI.Utilities.ObjectiveContainer{T},\n MOI.Utilities.FreeVariables,\n MOI.Utilities.MatrixOfConstraints{\n T,\n MOI.Utilities.MutableSparseMatrixCSC{\n T,\n Int,\n MOI.Utilities.OneBasedIndexing,\n },\n Vector{T},\n Cones{T},\n },\n};\n\njulia> model = MOI.instantiate(ConicModel{Float64}, with_bridge_type=Float64);\n\njulia> x = MOI.add_variable(model)\nMathOptInterface.VariableIndex(1)\n\njulia> c = MOI.add_constraint(model, x, MOI.GreaterThan(1.0))\nMathOptInterface.ConstraintIndex{MathOptInterface.VariableIndex, MathOptInterface.GreaterThan{Float64}}(1)\n\njulia> MOI.Bridges.is_bridged(model, c)\ntrue\n\njulia> bridge = MOI.Bridges.bridge(model, c)\nMathOptInterface.Bridges.Constraint.VectorizeBridge{Float64, MathOptInterface.VectorAffineFunction{Float64}, MathOptInterface.Nonnegatives, MathOptInterface.VariableIndex}(MathOptInterface.ConstraintIndex{MathOptInterface.VectorAffineFunction{Float64}, MathOptInterface.Nonnegatives}(1), 1.0)\n\njulia> bridge.vector_constraint\nMathOptInterface.ConstraintIndex{MathOptInterface.VectorAffineFunction{Float64}, MathOptInterface.Nonnegatives}(1)\n\njulia> MOI.Bridges.is_bridged(model, bridge.vector_constraint)\nfalse\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#.constraints","page":"API Reference","title":".constraints","text":"","category":"section"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"Utilities.VectorOfConstraints\nUtilities.StructOfConstraints\nUtilities.@struct_of_constraints_by_function_types\nUtilities.@struct_of_constraints_by_set_types\nUtilities.struct_of_constraint_code","category":"page"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.VectorOfConstraints","page":"API Reference","title":"MathOptInterface.Utilities.VectorOfConstraints","text":"mutable struct VectorOfConstraints{\n F<:MOI.AbstractFunction,\n S<:MOI.AbstractSet,\n} <: MOI.ModelLike\n constraints::CleverDicts.CleverDict{\n MOI.ConstraintIndex{F,S},\n Tuple{F,S},\n typeof(CleverDicts.key_to_index),\n typeof(CleverDicts.index_to_key),\n }\nend\n\nA struct storing F-in-S constraints as a mapping between the constraint indices to the corresponding tuple of function and set.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.StructOfConstraints","page":"API Reference","title":"MathOptInterface.Utilities.StructOfConstraints","text":"abstract type StructOfConstraints <: MOI.ModelLike end\n\nA struct storing a subfields other structs storing constraints of different types.\n\nSee Utilities.@struct_of_constraints_by_function_types and Utilities.@struct_of_constraints_by_set_types.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.@struct_of_constraints_by_function_types","page":"API Reference","title":"MathOptInterface.Utilities.@struct_of_constraints_by_function_types","text":"Utilities.@struct_of_constraints_by_function_types(name, func_types...)\n\nGiven a vector of n function types (F1, F2,..., Fn) in func_types, defines a subtype of StructOfConstraints of name name and which type parameters {T, C1, C2, ..., Cn}. It contains n field where the ith field has type Ci and stores the constraints of function type Fi.\n\nThe expression Fi can also be a union in which case any constraint for which the function type is in the union is stored in the field with type Ci.\n\n\n\n\n\n","category":"macro"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.@struct_of_constraints_by_set_types","page":"API Reference","title":"MathOptInterface.Utilities.@struct_of_constraints_by_set_types","text":"Utilities.@struct_of_constraints_by_set_types(name, func_types...)\n\nGiven a vector of n set types (S1, S2,..., Sn) in func_types, defines a subtype of StructOfConstraints of name name and which type parameters {T, C1, C2, ..., Cn}. It contains n field where the ith field has type Ci and stores the constraints of set type Si. The expression Si can also be a union in which case any constraint for which the set type is in the union is stored in the field with type Ci. This can be useful if Ci is a MatrixOfConstraints in order to concatenate the coefficients of constraints of several different set types in the same matrix.\n\n\n\n\n\n","category":"macro"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.struct_of_constraint_code","page":"API Reference","title":"MathOptInterface.Utilities.struct_of_constraint_code","text":"struct_of_constraint_code(struct_name, types, field_types = nothing)\n\nGiven a vector of n Union{SymbolFun,_UnionSymbolFS{SymbolFun}} or Union{SymbolSet,_UnionSymbolFS{SymbolSet}} in types, defines a subtype of StructOfConstraints of name name and which type parameters {T, F1, F2, ..., Fn} if field_types is nothing and a {T} otherwise. It contains n field where the ith field has type Ci if field_types is nothing and type field_types[i] otherwise. If types is vector of Union{SymbolFun,_UnionSymbolFS{SymbolFun}} (resp. Union{SymbolSet,_UnionSymbolFS{SymbolSet}}) then the constraints of that function (resp. set) type are stored in the corresponding field.\n\nThis function is used by the macros @model, @struct_of_constraints_by_function_types and @struct_of_constraints_by_set_types.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#Caching-optimizer","page":"API Reference","title":"Caching optimizer","text":"","category":"section"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"Utilities.CachingOptimizer\nUtilities.attach_optimizer\nUtilities.reset_optimizer\nUtilities.drop_optimizer\nUtilities.state\nUtilities.mode","category":"page"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.CachingOptimizer","page":"API Reference","title":"MathOptInterface.Utilities.CachingOptimizer","text":"CachingOptimizer\n\nCachingOptimizer is an intermediate layer that stores a cache of the model and links it with an optimizer. It supports incremental model construction and modification even when the optimizer doesn't.\n\nConstructors\n\n CachingOptimizer(cache::MOI.ModelLike, optimizer::AbstractOptimizer)\n\nCreates a CachingOptimizer in AUTOMATIC mode, with the optimizer optimizer.\n\nThe type of the optimizer returned is CachingOptimizer{typeof(optimizer), typeof(cache)} so it does not support the function reset_optimizer(::CachingOptimizer, new_optimizer) if the type of new_optimizer is different from the type of optimizer.\n\n CachingOptimizer(cache::MOI.ModelLike, mode::CachingOptimizerMode)\n\nCreates a CachingOptimizer in the NO_OPTIMIZER state and mode mode.\n\nThe type of the optimizer returned is CachingOptimizer{MOI.AbstractOptimizer,typeof(cache)} so it does support the function reset_optimizer(::CachingOptimizer, new_optimizer) if the type of new_optimizer is different from the type of optimizer.\n\nAbout the type\n\nStates\n\nA CachingOptimizer may be in one of three possible states (CachingOptimizerState):\n\nNO_OPTIMIZER: The CachingOptimizer does not have any optimizer.\nEMPTY_OPTIMIZER: The CachingOptimizer has an empty optimizer. The optimizer is not synchronized with the cached model.\nATTACHED_OPTIMIZER: The CachingOptimizer has an optimizer, and it is synchronized with the cached model.\n\nModes\n\nA CachingOptimizer has two modes of operation (CachingOptimizerMode):\n\nMANUAL: The only methods that change the state of the CachingOptimizer are Utilities.reset_optimizer, Utilities.drop_optimizer, and Utilities.attach_optimizer. Attempting to perform an operation in the incorrect state results in an error.\nAUTOMATIC: The CachingOptimizer changes its state when necessary. For example, optimize! will automatically call attach_optimizer (an optimizer must have been previously set). Attempting to add a constraint or perform a modification not supported by the optimizer results in a drop to EMPTY_OPTIMIZER mode.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.attach_optimizer","page":"API Reference","title":"MathOptInterface.Utilities.attach_optimizer","text":"attach_optimizer(model::CachingOptimizer)\n\nAttaches the optimizer to model, copying all model data into it. Can be called only from the EMPTY_OPTIMIZER state. If the copy succeeds, the CachingOptimizer will be in state ATTACHED_OPTIMIZER after the call, otherwise an error is thrown; see MOI.copy_to for more details on which errors can be thrown.\n\n\n\n\n\nMOIU.attach_optimizer(model::GenericModel)\n\nCall MOIU.attach_optimizer on the backend of model.\n\nCannot be called in direct mode.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.reset_optimizer","page":"API Reference","title":"MathOptInterface.Utilities.reset_optimizer","text":"reset_optimizer(m::CachingOptimizer, optimizer::MOI.AbstractOptimizer)\n\nSets or resets m to have the given empty optimizer optimizer.\n\nCan be called from any state. An assertion error will be thrown if optimizer is not empty.\n\nThe CachingOptimizer m will be in state EMPTY_OPTIMIZER after the call.\n\n\n\n\n\nreset_optimizer(m::CachingOptimizer)\n\nDetaches and empties the current optimizer. Can be called from ATTACHED_OPTIMIZER or EMPTY_OPTIMIZER state. The CachingOptimizer will be in state EMPTY_OPTIMIZER after the call.\n\n\n\n\n\nMOIU.reset_optimizer(model::GenericModel, optimizer::MOI.AbstractOptimizer)\n\nCall MOIU.reset_optimizer on the backend of model.\n\nCannot be called in direct mode.\n\n\n\n\n\nMOIU.reset_optimizer(model::GenericModel)\n\nCall MOIU.reset_optimizer on the backend of model.\n\nCannot be called in direct mode.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.drop_optimizer","page":"API Reference","title":"MathOptInterface.Utilities.drop_optimizer","text":"drop_optimizer(m::CachingOptimizer)\n\nDrops the optimizer, if one is present. Can be called from any state. The CachingOptimizer will be in state NO_OPTIMIZER after the call.\n\n\n\n\n\nMOIU.drop_optimizer(model::GenericModel)\n\nCall MOIU.drop_optimizer on the backend of model.\n\nCannot be called in direct mode.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.state","page":"API Reference","title":"MathOptInterface.Utilities.state","text":"state(m::CachingOptimizer)::CachingOptimizerState\n\nReturns the state of the CachingOptimizer m. See Utilities.CachingOptimizer.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.mode","page":"API Reference","title":"MathOptInterface.Utilities.mode","text":"mode(m::CachingOptimizer)::CachingOptimizerMode\n\nReturns the operating mode of the CachingOptimizer m. See Utilities.CachingOptimizer.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#Mock-optimizer","page":"API Reference","title":"Mock optimizer","text":"","category":"section"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"Utilities.MockOptimizer","category":"page"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.MockOptimizer","page":"API Reference","title":"MathOptInterface.Utilities.MockOptimizer","text":"MockOptimizer\n\nMockOptimizer is a fake optimizer especially useful for testing. Its main feature is that it can store the values that should be returned for each attribute.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#Printing","page":"API Reference","title":"Printing","text":"","category":"section"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"Utilities.latex_formulation","category":"page"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.latex_formulation","page":"API Reference","title":"MathOptInterface.Utilities.latex_formulation","text":"latex_formulation(model::MOI.ModelLike; kwargs...)\n\nWrap model in a type so that it can be pretty-printed as text/latex in a notebook like IJulia, or in Documenter.\n\nTo render the model, end the cell with latex_formulation(model), or call display(latex_formulation(model)) in to force the display of the model from inside a function.\n\nPossible keyword arguments are:\n\nsimplify_coefficients : Simplify coefficients if possible by omitting them or removing trailing zeros.\ndefault_name : The name given to variables with an empty name.\nprint_types : Print the MOI type of each function and set for clarity.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#Copy-utilities","page":"API Reference","title":"Copy utilities","text":"","category":"section"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"Utilities.default_copy_to\nUtilities.IndexMap\nUtilities.identity_index_map\nUtilities.ModelFilter\nUtilities.loadfromstring!","category":"page"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.default_copy_to","page":"API Reference","title":"MathOptInterface.Utilities.default_copy_to","text":"default_copy_to(dest::MOI.ModelLike, src::MOI.ModelLike)\n\nA default implementation of MOI.copy_to(dest, src) for models that implement the incremental interface, that is, MOI.supports_incremental_interface returns true.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.IndexMap","page":"API Reference","title":"MathOptInterface.Utilities.IndexMap","text":"IndexMap()\n\nThe dictionary-like object returned by MOI.copy_to.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.identity_index_map","page":"API Reference","title":"MathOptInterface.Utilities.identity_index_map","text":"identity_index_map(model::MOI.ModelLike)\n\nReturn an IndexMap that maps all variable and constraint indices of model to themselves.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.ModelFilter","page":"API Reference","title":"MathOptInterface.Utilities.ModelFilter","text":"ModelFilter(filter::Function, model::MOI.ModelLike)\n\nA layer to filter out various components of model.\n\nThe filter function takes a single argument, which is each element from the list returned by the attributes below. It returns true if the element should be visible in the filtered model and false otherwise.\n\nThe components that are filtered are:\n\nEntire constraint types via:\nMOI.ListOfConstraintTypesPresent\nIndividual constraints via:\nMOI.ListOfConstraintIndices{F,S}\nSpecific attributes via:\nMOI.ListOfModelAttributesSet\nMOI.ListOfConstraintAttributesSet\nMOI.ListOfVariableAttributesSet\n\nwarning: Warning\nThe list of attributes filtered may change in a future release. You should write functions that are generic and not limited to the five types listed above. Thus, you should probably define a fallback filter(::Any) = true.\n\nSee below for examples of how this works.\n\nnote: Note\nThis layer has a limited scope. It is intended by be used in conjunction with MOI.copy_to.\n\nExample: copy model excluding integer constraints\n\nUse the do syntax to provide a single function.\n\nfiltered_src = MOI.Utilities.ModelFilter(src) do item\n return item != (MOI.VariableIndex, MOI.Integer)\nend\nMOI.copy_to(dest, filtered_src)\n\nExample: copy model excluding names\n\nUse type dispatch to simplify the implementation:\n\nmy_filter(::Any) = true # Note the generic fallback\nmy_filter(::MOI.VariableName) = false\nmy_filter(::MOI.ConstraintName) = false\nfiltered_src = MOI.Utilities.ModelFilter(my_filter, src)\nMOI.copy_to(dest, filtered_src)\n\nExample: copy irreducible infeasible subsystem\n\nmy_filter(::Any) = true # Note the generic fallback\nfunction my_filter(ci::MOI.ConstraintIndex)\n status = MOI.get(dest, MOI.ConstraintConflictStatus(), ci)\n return status != MOI.NOT_IN_CONFLICT\nend\nfiltered_src = MOI.Utilities.ModelFilter(my_filter, src)\nMOI.copy_to(dest, filtered_src)\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.loadfromstring!","page":"API Reference","title":"MathOptInterface.Utilities.loadfromstring!","text":"loadfromstring!(model, s)\n\nA utility function to aid writing tests.\n\nwarning: Warning\nThis function is not intended for widespread use. It is mainly used as a tool to simplify writing tests in MathOptInterface. Do not use it as an exchange format for storing or transmitting problem instances. Use the FileFormats submodule instead.\n\nExample\n\njulia> model = MOI.Utilities.Model{Float64}();\n\njulia> MOI.Utilities.loadfromstring!(model, \"\"\"\n variables: x, y, z\n constrainedvariable: [a, b, c] in Nonnegatives(3)\n minobjective::Float64: 2x + 3y\n con1: x + y <= 1.0\n con2: [x, y] in Nonnegatives(2)\n x >= 0.0\n \"\"\")\n\nNotes\n\nSpecial labels are:\n\nvariables\nminobjective\nmaxobjectives\n\nEverything else denotes a constraint with a name.\n\nAppend ::T to use an element type of T when parsing the function.\n\nDo not name VariableIndex constraints.\n\nExceptions\n\nx - y does NOT currently parse. Instead, write x + -1.0 * y.\nx^2 does NOT currently parse. Instead, write x * x.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#Penalty-relaxation","page":"API Reference","title":"Penalty relaxation","text":"","category":"section"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"Utilities.PenaltyRelaxation\nUtilities.ScalarPenaltyRelaxation","category":"page"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.PenaltyRelaxation","page":"API Reference","title":"MathOptInterface.Utilities.PenaltyRelaxation","text":"PenaltyRelaxation(\n penalties = Dict{MOI.ConstraintIndex,Float64}();\n default::Union{Nothing,T} = 1.0,\n)\n\nA problem modifier that, when passed to MOI.modify, destructively modifies the model in-place to create a penalized relaxation of the constraints.\n\nwarning: Warning\nThis is a destructive routine that modifies the model in-place. If you don't want to modify the original model, use JuMP.copy_model to create a copy before calling MOI.modify.\n\nReformulation\n\nSee Utilities.ScalarPenaltyRelaxation for details of the reformulation.\n\nFor each constraint ci, the penalty passed to Utilities.ScalarPenaltyRelaxation is get(penalties, ci, default). If the value is nothing, because ci does not exist in penalties and default = nothing, then the constraint is skipped.\n\nReturn value\n\nMOI.modify(model, PenaltyRelaxation()) returns a Dict{MOI.ConstraintIndex,MOI.ScalarAffineFunction} that maps each constraint index to the corresponding y + z as a MOI.ScalarAffineFunction. In an optimal solution, query the value of these functions to compute the violation of each constraint.\n\nRelax a subset of constraints\n\nTo relax a subset of constraints, pass a penalties dictionary and set default = nothing.\n\nSupported constraint types\n\nThe penalty relaxation is currently limited to modifying MOI.ScalarAffineFunction and MOI.ScalarQuadraticFunction constraints in the linear sets MOI.LessThan, MOI.GreaterThan, MOI.EqualTo and MOI.Interval.\n\nIt does not include variable bound or integrality constraints, because these cannot be modified in-place.\n\nTo modify variable bounds, rewrite them as linear constraints.\n\nExample\n\njulia> model = MOI.Utilities.Model{Float64}();\n\njulia> x = MOI.add_variable(model);\n\njulia> c = MOI.add_constraint(model, 1.0 * x, MOI.LessThan(2.0));\n\njulia> map = MOI.modify(model, MOI.Utilities.PenaltyRelaxation(default = 2.0));\n\njulia> print(model)\nMinimize ScalarAffineFunction{Float64}:\n 0.0 + 2.0 v[2]\n\nSubject to:\n\nScalarAffineFunction{Float64}-in-LessThan{Float64}\n 0.0 + 1.0 v[1] - 1.0 v[2] <= 2.0\n\nVariableIndex-in-GreaterThan{Float64}\n v[2] >= 0.0\n\njulia> map[c] isa MOI.ScalarAffineFunction{Float64}\ntrue\n\njulia> model = MOI.Utilities.Model{Float64}();\n\njulia> x = MOI.add_variable(model);\n\njulia> c = MOI.add_constraint(model, 1.0 * x, MOI.LessThan(2.0));\n\njulia> map = MOI.modify(model, MOI.Utilities.PenaltyRelaxation(Dict(c => 3.0)));\n\njulia> print(model)\nMinimize ScalarAffineFunction{Float64}:\n 0.0 + 3.0 v[2]\n\nSubject to:\n\nScalarAffineFunction{Float64}-in-LessThan{Float64}\n 0.0 + 1.0 v[1] - 1.0 v[2] <= 2.0\n\nVariableIndex-in-GreaterThan{Float64}\n v[2] >= 0.0\n\njulia> map[c] isa MOI.ScalarAffineFunction{Float64}\ntrue\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.ScalarPenaltyRelaxation","page":"API Reference","title":"MathOptInterface.Utilities.ScalarPenaltyRelaxation","text":"ScalarPenaltyRelaxation(penalty::T) where {T}\n\nA problem modifier that, when passed to MOI.modify, destructively modifies the constraint in-place to create a penalized relaxation of the constraint.\n\nwarning: Warning\nThis is a destructive routine that modifies the constraint in-place. If you don't want to modify the original model, use JuMP.copy_model to create a copy before calling MOI.modify.\n\nReformulation\n\nThe penalty relaxation modifies constraints of the form f(x) in S into f(x) + y - z in S, where y z ge 0, and then it introduces a penalty term into the objective of a times (y + z) (if minimizing, else -a), where a is penalty\n\nWhen S is MOI.LessThan or MOI.GreaterThan, we omit y or z respectively as a performance optimization.\n\nReturn value\n\nMOI.modify(model, ci, ScalarPenaltyRelaxation(penalty)) returns y + z as a MOI.ScalarAffineFunction. In an optimal solution, query the value of this function to compute the violation of the constraint.\n\nExample\n\njulia> model = MOI.Utilities.Model{Float64}();\n\njulia> x = MOI.add_variable(model);\n\njulia> c = MOI.add_constraint(model, 1.0 * x, MOI.LessThan(2.0));\n\njulia> f = MOI.modify(model, c, MOI.Utilities.ScalarPenaltyRelaxation(2.0));\n\njulia> print(model)\nMinimize ScalarAffineFunction{Float64}:\n 0.0 + 2.0 v[2]\n\nSubject to:\n\nScalarAffineFunction{Float64}-in-LessThan{Float64}\n 0.0 + 1.0 v[1] - 1.0 v[2] <= 2.0\n\nVariableIndex-in-GreaterThan{Float64}\n v[2] >= 0.0\n\njulia> f isa MOI.ScalarAffineFunction{Float64}\ntrue\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#MatrixOfConstraints","page":"API Reference","title":"MatrixOfConstraints","text":"","category":"section"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"Utilities.MatrixOfConstraints","category":"page"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.MatrixOfConstraints","page":"API Reference","title":"MathOptInterface.Utilities.MatrixOfConstraints","text":"mutable struct MatrixOfConstraints{T,AT,BT,ST} <: MOI.ModelLike\n coefficients::AT\n constants::BT\n sets::ST\n caches::Vector{Any}\n are_indices_mapped::Vector{BitSet}\n final_touch::Bool\nend\n\nRepresent ScalarAffineFunction and VectorAffinefunction constraints in a matrix form where the linear coefficients of the functions are stored in the coefficients field, the constants of the functions or sets are stored in the constants field. Additional information about the sets are stored in the sets field.\n\nThis model can only be used as the constraints field of a MOI.Utilities.AbstractModel.\n\nWhen the constraints are added, they are stored in the caches field. They are only loaded in the coefficients and constants fields once MOI.Utilities.final_touch is called. For this reason, MatrixOfConstraints should not be used by an incremental interface. Use MOI.copy_to instead.\n\nThe constraints can be added in two different ways:\n\nWith add_constraint, in which case a canonicalized copy of the function is stored in caches.\nWith pass_nonvariable_constraints, in which case the functions and sets are stored themselves in caches without mapping the variable indices. The corresponding index in caches is added in are_indices_mapped. This avoids doing a copy of the function in case the getter of CanonicalConstraintFunction does not make a copy for the source model, for example, this is the case of VectorOfConstraints.\n\nWe illustrate this with an example. Suppose a model is copied from a src::MOI.Utilities.Model to a bridged model with a MatrixOfConstraints. For all the types that are not bridged, the constraints will be copied with pass_nonvariable_constraints. Hence the functions stored in caches are exactly the same as the ones stored in src. This is ok since this is only during the copy_to operation during which src cannot be modified. On the other hand, for the types that are bridged, the functions added may contain duplicates even if the functions did not contain duplicates in src so duplicates are removed with MOI.Utilities.canonical.\n\nInterface\n\nThe .coefficients::AT type must implement:\n\nAT()\nMOI.empty(::AT)!\nMOI.Utilities.add_column\nMOI.Utilities.set_number_of_rows\nMOI.Utilities.allocate_terms\nMOI.Utilities.load_terms\nMOI.Utilities.final_touch\n\nThe .constants::BT type must implement:\n\nBT()\nBase.empty!(::BT)\nBase.resize(::BT)\nMOI.Utilities.load_constants\nMOI.Utilities.function_constants\nMOI.Utilities.set_from_constants\n\nThe .sets::ST type must implement:\n\nST()\nMOI.is_empty(::ST)\nMOI.empty(::ST)\nMOI.dimension(::ST)\nMOI.is_valid(::ST, ::MOI.ConstraintIndex)\nMOI.get(::ST, ::MOI.ListOfConstraintTypesPresent)\nMOI.get(::ST, ::MOI.NumberOfConstraints)\nMOI.get(::ST, ::MOI.ListOfConstraintIndices)\nMOI.Utilities.set_types\nMOI.Utilities.set_index\nMOI.Utilities.add_set\nMOI.Utilities.rows\nMOI.Utilities.final_touch\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#.coefficients","page":"API Reference","title":".coefficients","text":"","category":"section"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"Utilities.add_column\nUtilities.allocate_terms\nUtilities.set_number_of_rows\nUtilities.load_terms\nUtilities.final_touch\nUtilities.extract_function","category":"page"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.add_column","page":"API Reference","title":"MathOptInterface.Utilities.add_column","text":"add_column(coefficients)::Nothing\n\nTell coefficients to pre-allocate datastructures as needed to store one column.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.allocate_terms","page":"API Reference","title":"MathOptInterface.Utilities.allocate_terms","text":"allocate_terms(coefficients, index_map, func)::Nothing\n\nTell coefficients that the terms of the function func where the variable indices are mapped with index_map will be loaded with load_terms.\n\nThe function func must be canonicalized before calling allocate_terms. See is_canonical.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.set_number_of_rows","page":"API Reference","title":"MathOptInterface.Utilities.set_number_of_rows","text":"set_number_of_rows(coefficients, n)::Nothing\n\nTell coefficients to pre-allocate datastructures as needed to store n rows.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.load_terms","page":"API Reference","title":"MathOptInterface.Utilities.load_terms","text":"load_terms(coefficients, index_map, func, offset)::Nothing\n\nLoads the terms of func to coefficients, mapping the variable indices with index_map.\n\nThe ith dimension of func is loaded at the (offset + i)th row of coefficients.\n\nThe function must be allocated first with allocate_terms.\n\nThe function func must be canonicalized, see is_canonical.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.final_touch","page":"API Reference","title":"MathOptInterface.Utilities.final_touch","text":"final_touch(coefficients)::Nothing\n\nInforms the coefficients that all functions have been added with load_terms. No more modification is allowed unless MOI.empty! is called.\n\nfinal_touch(sets)::Nothing\n\nInforms the sets that all functions have been added with add_set. No more modification is allowed unless MOI.empty! is called.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.extract_function","page":"API Reference","title":"MathOptInterface.Utilities.extract_function","text":"extract_function(coefficients, row::Integer, constant::T) where {T}\n\nReturn the MOI.ScalarAffineFunction{T} function corresponding to row row in coefficients.\n\nextract_function(\n coefficients,\n rows::UnitRange,\n constants::Vector{T},\n) where{T}\n\nReturn the MOI.VectorAffineFunction{T} function corresponding to rows rows in coefficients.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"Utilities.MutableSparseMatrixCSC\nUtilities.AbstractIndexing\nUtilities.ZeroBasedIndexing\nUtilities.OneBasedIndexing","category":"page"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.MutableSparseMatrixCSC","page":"API Reference","title":"MathOptInterface.Utilities.MutableSparseMatrixCSC","text":"mutable struct MutableSparseMatrixCSC{Tv,Ti<:Integer,I<:AbstractIndexing}\n indexing::I\n m::Int\n n::Int\n colptr::Vector{Ti}\n rowval::Vector{Ti}\n nzval::Vector{Tv}\n nz_added::Vector{Ti}\nend\n\nMatrix type loading sparse matrices in the Compressed Sparse Column format. The indexing used is indexing, see AbstractIndexing. The other fields have the same meaning than for SparseArrays.SparseMatrixCSC except that the indexing is different unless indexing is OneBasedIndexing. In addition, nz_added is used to cache the number of non-zero terms that have been added to each column due to the incremental nature of load_terms.\n\nThe matrix is loaded in 5 steps:\n\nMOI.empty! is called.\nMOI.Utilities.add_column and MOI.Utilities.allocate_terms are called in any order.\nMOI.Utilities.set_number_of_rows is called.\nMOI.Utilities.load_terms is called for each affine function.\nMOI.Utilities.final_touch is called.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.AbstractIndexing","page":"API Reference","title":"MathOptInterface.Utilities.AbstractIndexing","text":"abstract type AbstractIndexing end\n\nIndexing to be used for storing the row and column indices of MutableSparseMatrixCSC. See ZeroBasedIndexing and OneBasedIndexing.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.ZeroBasedIndexing","page":"API Reference","title":"MathOptInterface.Utilities.ZeroBasedIndexing","text":"struct ZeroBasedIndexing <: AbstractIndexing end\n\nZero-based indexing: the ith row or column has index i - 1. This is useful when the vectors of row and column indices need to be communicated to a library using zero-based indexing such as C libraries.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.OneBasedIndexing","page":"API Reference","title":"MathOptInterface.Utilities.OneBasedIndexing","text":"struct ZeroBasedIndexing <: AbstractIndexing end\n\nOne-based indexing: the ith row or column has index i. This enables an allocation-free conversion of MutableSparseMatrixCSC to SparseArrays.SparseMatrixCSC.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#.constants","page":"API Reference","title":".constants","text":"","category":"section"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"Utilities.load_constants\nUtilities.function_constants\nUtilities.set_from_constants\nUtilities.modify_constants","category":"page"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.load_constants","page":"API Reference","title":"MathOptInterface.Utilities.load_constants","text":"load_constants(constants, offset, func_or_set)::Nothing\n\nThis function loads the constants of func_or_set in constants at an offset of offset. Where offset is the sum of the dimensions of the constraints already loaded. The storage should be preallocated with resize! before calling this function.\n\nThis function should be implemented to be usable as storage of constants for MatrixOfConstraints.\n\nThe constants are loaded in three steps:\n\nBase.empty! is called.\nBase.resize! is called with the sum of the dimensions of all constraints.\nMOI.Utilities.load_constants is called for each function for vector constraint or set for scalar constraint.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.function_constants","page":"API Reference","title":"MathOptInterface.Utilities.function_constants","text":"function_constants(constants, rows)\n\nThis function returns the function constants that were loaded with load_constants at the rows rows.\n\nThis function should be implemented to be usable as storage of constants for MatrixOfConstraints.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.set_from_constants","page":"API Reference","title":"MathOptInterface.Utilities.set_from_constants","text":"set_from_constants(constants, S::Type, rows)::S\n\nThis function returns an instance of the set S for which the constants where loaded with load_constants at the rows rows.\n\nThis function should be implemented to be usable as storage of constants for MatrixOfConstraints.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.modify_constants","page":"API Reference","title":"MathOptInterface.Utilities.modify_constants","text":"modify_constants(constants, row::Integer, new_constant::T) where {T}\nmodify_constants(\n constants,\n rows::AbstractVector{<:Integer},\n new_constants::AbstractVector{T},\n) where {T}\n\nModify constants in-place to store new_constant in the row row, or rows rows.\n\nThis function must be implemented to enable MOI.ScalarConstantChange and MOI.VectorConstantChange for MatrixOfConstraints.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"Utilities.Hyperrectangle","category":"page"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.Hyperrectangle","page":"API Reference","title":"MathOptInterface.Utilities.Hyperrectangle","text":"struct Hyperrectangle{T} <: AbstractVectorBounds\n lower::Vector{T}\n upper::Vector{T}\nend\n\nA struct for the .constants field in MatrixOfConstraints.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#.sets","page":"API Reference","title":".sets","text":"","category":"section"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"Utilities.set_index\nUtilities.set_types\nUtilities.add_set\nUtilities.rows\nUtilities.num_rows\nUtilities.set_with_dimension","category":"page"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.set_index","page":"API Reference","title":"MathOptInterface.Utilities.set_index","text":"set_index(sets, ::Type{S})::Union{Int,Nothing} where {S<:MOI.AbstractSet}\n\nReturn an integer corresponding to the index of the set type in the list given by set_types.\n\nIf S is not part of the list, return nothing.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.set_types","page":"API Reference","title":"MathOptInterface.Utilities.set_types","text":"set_types(sets)::Vector{Type}\n\nReturn the list of the types of the sets allowed in sets.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.add_set","page":"API Reference","title":"MathOptInterface.Utilities.add_set","text":"add_set(sets, i)::Int64\n\nAdd a scalar set of type index i.\n\nadd_set(sets, i, dim)::Int64\n\nAdd a vector set of type index i and dimension dim.\n\nBoth methods return a unique Int64 of the set that can be used to reference this set.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.rows","page":"API Reference","title":"MathOptInterface.Utilities.rows","text":"rows(sets, ci::MOI.ConstraintIndex)::Union{Int,UnitRange{Int}}\n\nReturn the rows in 1:MOI.dimension(sets) corresponding to the set of id ci.value.\n\nFor scalar sets, this returns an Int. For vector sets, this returns an UnitRange{Int}.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.num_rows","page":"API Reference","title":"MathOptInterface.Utilities.num_rows","text":"num_rows(sets::OrderedProductOfSets, ::Type{S}) where {S}\n\nReturn the number of rows corresponding to a set of type S. That is, it is the sum of the dimensions of the sets of type S.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.set_with_dimension","page":"API Reference","title":"MathOptInterface.Utilities.set_with_dimension","text":"set_with_dimension(::Type{S}, dim) where {S<:MOI.AbstractVectorSet}\n\nReturns the instance of S of MOI.dimension dim. This needs to be implemented for sets of type S to be useable with MatrixOfConstraints.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"Utilities.ProductOfSets\nUtilities.MixOfScalarSets\nUtilities.@mix_of_scalar_sets\nUtilities.OrderedProductOfSets\nUtilities.@product_of_sets","category":"page"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.ProductOfSets","page":"API Reference","title":"MathOptInterface.Utilities.ProductOfSets","text":"abstract type ProductOfSets{T} end\n\nRepresents a cartesian product of sets of given types.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.MixOfScalarSets","page":"API Reference","title":"MathOptInterface.Utilities.MixOfScalarSets","text":"abstract type MixOfScalarSets{T} <: ProductOfSets{T} end\n\nProduct of scalar sets in the order the constraints are added, mixing the constraints of different types.\n\nUse @mix_of_scalar_sets to generate a new subtype.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.@mix_of_scalar_sets","page":"API Reference","title":"MathOptInterface.Utilities.@mix_of_scalar_sets","text":"@mix_of_scalar_sets(name, set_types...)\n\nGenerate a new MixOfScalarSets subtype.\n\nExample\n\njulia> import MathOptInterface as MOI\n\njulia> MOI.Utilities.@mix_of_scalar_sets(\n MixedIntegerLinearProgramSets,\n MOI.GreaterThan{T},\n MOI.LessThan{T},\n MOI.EqualTo{T},\n MOI.Integer,\n )\n\n\n\n\n\n","category":"macro"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.OrderedProductOfSets","page":"API Reference","title":"MathOptInterface.Utilities.OrderedProductOfSets","text":"abstract type OrderedProductOfSets{T} <: ProductOfSets{T} end\n\nProduct of sets in the order the constraints are added, grouping the constraints of the same types contiguously.\n\nUse @product_of_sets to generate new subtypes.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.@product_of_sets","page":"API Reference","title":"MathOptInterface.Utilities.@product_of_sets","text":"@product_of_sets(name, set_types...)\n\nGenerate a new OrderedProductOfSets subtype.\n\nExample\n\njulia> import MathOptInterface as MOI\n\njulia> MOI.Utilities.@product_of_sets(\n LinearOrthants,\n MOI.Zeros,\n MOI.Nonnegatives,\n MOI.Nonpositives,\n MOI.ZeroOne,\n )\n\n\n\n\n\n","category":"macro"},{"location":"moi/submodules/Utilities/reference/#Fallbacks","page":"API Reference","title":"Fallbacks","text":"","category":"section"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"Utilities.get_fallback","category":"page"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.get_fallback","page":"API Reference","title":"MathOptInterface.Utilities.get_fallback","text":"get_fallback(model::MOI.ModelLike, ::MOI.ObjectiveValue)\n\nCompute the objective function value using the VariablePrimal results and the ObjectiveFunction value.\n\n\n\n\n\nget_fallback(\n model::MOI.ModelLike,\n ::MOI.DualObjectiveValue,\n ::Type{T},\n)::T where {T}\n\nCompute the dual objective value of type T using the ConstraintDual results and the ConstraintFunction and ConstraintSet values.\n\nNote that the nonlinear part of the model is ignored.\n\n\n\n\n\nget_fallback(\n model::MOI.ModelLike,\n ::MOI.ConstraintPrimal,\n constraint_index::MOI.ConstraintIndex,\n)\n\nCompute the value of the function of the constraint of index constraint_index using the VariablePrimal results and the ConstraintFunction values.\n\n\n\n\n\nget_fallback(\n model::MOI.ModelLike,\n attr::MOI.ConstraintDual,\n ci::MOI.ConstraintIndex{Union{MOI.VariableIndex,MOI.VectorOfVariables}},\n ::Type{T} = Float64,\n) where {T}\n\nCompute the dual of the constraint of index ci using the ConstraintDual of other constraints and the ConstraintFunction values.\n\nThrows an error if some constraints are quadratic or if there is one another MOI.VariableIndex-in-S or MOI.VectorOfVariables-in-S constraint with one of the variables in the function of the constraint ci.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#Function-utilities","page":"API Reference","title":"Function utilities","text":"","category":"section"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"The following utilities are available for functions:","category":"page"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"Utilities.eval_variables\nUtilities.map_indices\nUtilities.substitute_variables\nUtilities.filter_variables\nUtilities.remove_variable\nUtilities.all_coefficients\nUtilities.unsafe_add\nUtilities.isapprox_zero\nUtilities.modify_function\nUtilities.zero_with_output_dimension","category":"page"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.eval_variables","page":"API Reference","title":"MathOptInterface.Utilities.eval_variables","text":"eval_variables(value_fn::Function, f::MOI.AbstractFunction)\n\nReturns the value of function f if each variable index vi is evaluated as value_fn(vi).\n\nNote that value_fn must return a Number. See substitute_variables for a similar function where value_fn returns an MOI.AbstractScalarFunction.\n\nwarning: Warning\nThe two-argument version of eval_variables is deprecated and may be removed in MOI v2.0.0. Use the three-argument method eval_variables(::Function, ::MOI.ModelLike, ::MOI.AbstractFunction) instead.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.map_indices","page":"API Reference","title":"MathOptInterface.Utilities.map_indices","text":"map_indices(index_map::Function, attr::MOI.AnyAttribute, x::X)::X where {X}\n\nSubstitute any MOI.VariableIndex (resp. MOI.ConstraintIndex) in x by the MOI.VariableIndex (resp. MOI.ConstraintIndex) of the same type given by index_map(x).\n\nWhen to implement this method for new types X\n\nThis function is used by implementations of MOI.copy_to on constraint functions, attribute values and submittable values. If you define a new attribute whose values x::X contain variable or constraint indices, you must also implement this function.\n\n\n\n\n\nmap_indices(\n variable_map::AbstractDict{T,T},\n x::X,\n)::X where {T<:MOI.Index,X}\n\nShortcut for map_indices(vi -> variable_map[vi], x).\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.substitute_variables","page":"API Reference","title":"MathOptInterface.Utilities.substitute_variables","text":"substitute_variables(variable_map::Function, x)\n\nSubstitute any MOI.VariableIndex in x by variable_map(x). The variable_map function returns either MOI.VariableIndex or MOI.ScalarAffineFunction, see eval_variables for a similar function where variable_map returns a number.\n\nThis function is used by bridge optimizers on constraint functions, attribute values and submittable values when at least one variable bridge is used hence it needs to be implemented for custom types that are meant to be used as attribute or submittable value.\n\nnote: Note\nWhen implementing a new method, don't use substitute_variables(::Function, because Julia will not specialize on it. Use instead substitute_variables(::F, ...) where {F<:Function}.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.filter_variables","page":"API Reference","title":"MathOptInterface.Utilities.filter_variables","text":"filter_variables(keep::Function, f::AbstractFunction)\n\nReturn a new function f with the variable vi such that !keep(vi) removed.\n\nWARNING: Don't define filter_variables(::Function, ...) because Julia will not specialize on this. Define instead filter_variables(::F, ...) where {F<:Function}.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.remove_variable","page":"API Reference","title":"MathOptInterface.Utilities.remove_variable","text":"remove_variable(f::AbstractFunction, vi::VariableIndex)\n\nReturn a new function f with the variable vi removed.\n\n\n\n\n\nremove_variable(\n f::MOI.AbstractFunction,\n s::MOI.AbstractSet,\n vi::MOI.VariableIndex,\n)\n\nReturn a tuple (g, t) representing the constraint f-in-s with the variable vi removed. That is, the terms containing the variable vi in the function f are removed and the dimension of the set s is updated if needed (for example, when f is a VectorOfVariables with vi being one of the variables).\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.all_coefficients","page":"API Reference","title":"MathOptInterface.Utilities.all_coefficients","text":"all_coefficients(p::Function, f::MOI.AbstractFunction)\n\nDetermine whether predicate p returns true for all coefficients of f, returning false as soon as the first coefficient of f for which p returns false is encountered (short-circuiting). Similar to all.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.unsafe_add","page":"API Reference","title":"MathOptInterface.Utilities.unsafe_add","text":"unsafe_add(t1::MOI.ScalarAffineTerm, t2::MOI.ScalarAffineTerm)\n\nSums the coefficients of t1 and t2 and returns an output MOI.ScalarAffineTerm. It is unsafe because it uses the variable of t1 as the variable of the output without checking that it is equal to that of t2.\n\n\n\n\n\nunsafe_add(t1::MOI.ScalarQuadraticTerm, t2::MOI.ScalarQuadraticTerm)\n\nSums the coefficients of t1 and t2 and returns an output MOI.ScalarQuadraticTerm. It is unsafe because it uses the variable's of t1 as the variable's of the output without checking that they are the same (up to permutation) to those of t2.\n\n\n\n\n\nunsafe_add(t1::MOI.VectorAffineTerm, t2::MOI.VectorAffineTerm)\n\nSums the coefficients of t1 and t2 and returns an output MOI.VectorAffineTerm. It is unsafe because it uses the output_index and variable of t1 as the output_index and variable of the output term without checking that they are equal to those of t2.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.isapprox_zero","page":"API Reference","title":"MathOptInterface.Utilities.isapprox_zero","text":"isapprox_zero(f::MOI.AbstractFunction, tol)\n\nReturn a Bool indicating whether the function f is approximately zero using tol as a tolerance.\n\nImportant note\n\nThis function assumes that f does not contain any duplicate terms, you might want to first call canonical if that is not guaranteed.\n\nExample\n\njulia> import MathOptInterface as MOI\n\njulia> x = MOI.VariableIndex(1)\nMOI.VariableIndex(1)\n\njulia> f = MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([1, -1], [x, x]), 0)\n(0) + (1) MOI.VariableIndex(1) - (1) MOI.VariableIndex(1)\n\njulia> MOI.Utilities.isapprox_zero(f, 1e-8)\nfalse\n\njulia> MOI.Utilities.isapprox_zero(MOI.Utilities.canonical(f), 1e-8)\ntrue\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.modify_function","page":"API Reference","title":"MathOptInterface.Utilities.modify_function","text":"modify_function(f::AbstractFunction, change::AbstractFunctionModification)\n\nReturn a copy of the function f, modified according to change.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.zero_with_output_dimension","page":"API Reference","title":"MathOptInterface.Utilities.zero_with_output_dimension","text":"zero_with_output_dimension(::Type{T}, output_dimension::Integer) where {T}\n\nCreate an instance of type T with the output dimension output_dimension.\n\nThis is mostly useful in Bridges, when code needs to be agnostic to the type of vector-valued function that is passed in.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"The following functions can be used to canonicalize a function:","category":"page"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"Utilities.is_canonical\nUtilities.canonical\nUtilities.canonicalize!","category":"page"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.is_canonical","page":"API Reference","title":"MathOptInterface.Utilities.is_canonical","text":"is_canonical(f::Union{ScalarAffineFunction, VectorAffineFunction})\n\nReturns a Bool indicating whether the function is in canonical form. See canonical.\n\n\n\n\n\nis_canonical(f::Union{ScalarQuadraticFunction, VectorQuadraticFunction})\n\nReturns a Bool indicating whether the function is in canonical form. See canonical.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.canonical","page":"API Reference","title":"MathOptInterface.Utilities.canonical","text":"canonical(f::MOI.AbstractFunction)\n\nReturns the function in a canonical form, that is,\n\nA term appear only once.\nThe coefficients are nonzero.\nThe terms appear in increasing order of variable where there the order of the variables is the order of their value.\nFor a AbstractVectorFunction, the terms are sorted in ascending order of output index.\n\nThe output of canonical can be assumed to be a copy of f, even for VectorOfVariables.\n\nExample\n\njulia> import MathOptInterface as MOI\n\njulia> x, y, z = MOI.VariableIndex.(1:3);\n\njulia> f = MOI.ScalarAffineFunction(\n MOI.ScalarAffineTerm.(Float64[2, 1, 3, -2, -3], [y, x, z, x, z]),\n 5.0,\n )\n5.0 + 2.0 MOI.VariableIndex(2) + 1.0 MOI.VariableIndex(1) + 3.0 MOI.VariableIndex(3) - 2.0 MOI.VariableIndex(1) - 3.0 MOI.VariableIndex(3)\n\njulia> MOI.Utilities.canonical(f)\n5.0 - 1.0 MOI.VariableIndex(1) + 2.0 MOI.VariableIndex(2)\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.canonicalize!","page":"API Reference","title":"MathOptInterface.Utilities.canonicalize!","text":"canonicalize!(f::Union{ScalarAffineFunction, VectorAffineFunction})\n\nConvert a function to canonical form in-place, without allocating a copy to hold the result. See canonical.\n\n\n\n\n\ncanonicalize!(f::Union{ScalarQuadraticFunction, VectorQuadraticFunction})\n\nConvert a function to canonical form in-place, without allocating a copy to hold the result. See canonical.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"The following functions can be used to manipulate functions with basic algebra:","category":"page"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"Utilities.scalar_type\nUtilities.scalarize\nUtilities.eachscalar\nUtilities.promote_operation\nUtilities.operate\nUtilities.operate!\nUtilities.operate_output_index!\nUtilities.vectorize","category":"page"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.scalar_type","page":"API Reference","title":"MathOptInterface.Utilities.scalar_type","text":"scalar_type(F::Type{<:MOI.AbstractVectorFunction})\n\nType of functions obtained by indexing objects obtained by calling eachscalar on functions of type F.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.scalarize","page":"API Reference","title":"MathOptInterface.Utilities.scalarize","text":"scalarize(func::MOI.VectorOfVariables, ignore_constants::Bool = false)\n\nReturns a vector of scalar functions making up the vector function in the form of a Vector{MOI.SingleVariable}.\n\nSee also eachscalar.\n\n\n\n\n\nscalarize(func::MOI.VectorAffineFunction{T}, ignore_constants::Bool = false)\n\nReturns a vector of scalar functions making up the vector function in the form of a Vector{MOI.ScalarAffineFunction{T}}.\n\nSee also eachscalar.\n\n\n\n\n\nscalarize(func::MOI.VectorQuadraticFunction{T}, ignore_constants::Bool = false)\n\nReturns a vector of scalar functions making up the vector function in the form of a Vector{MOI.ScalarQuadraticFunction{T}}.\n\nSee also eachscalar.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.eachscalar","page":"API Reference","title":"MathOptInterface.Utilities.eachscalar","text":"eachscalar(f::MOI.AbstractVectorFunction)\n\nReturns an iterator for the scalar components of the vector function.\n\nSee also scalarize.\n\n\n\n\n\neachscalar(f::MOI.AbstractVector)\n\nReturns an iterator for the scalar components of the vector.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.promote_operation","page":"API Reference","title":"MathOptInterface.Utilities.promote_operation","text":"promote_operation(\n op::Function,\n ::Type{T},\n ArgsTypes::Type{<:Union{T,AbstractVector{T},MOI.AbstractFunction}}...,\n) where {T<:Number}\n\nCompute the return type of the call operate(op, T, args...), where the types of the arguments args are ArgsTypes.\n\nOne assumption is that the element type T is invariant under each operation. That is, op(::T, ::T)::T where op is a +, -, *, and /.\n\nThere are six methods for which we implement Utilities.promote_operation:\n\n+ a. promote_operation(::typeof(+), ::Type{T}, ::Type{F1}, ::Type{F2})\n- a. promote_operation(::typeof(-), ::Type{T}, ::Type{F}) b. promote_operation(::typeof(-), ::Type{T}, ::Type{F1}, ::Type{F2})\n* a. promote_operation(::typeof(*), ::Type{T}, ::Type{T}, ::Type{F}) b. promote_operation(::typeof(*), ::Type{T}, ::Type{F}, ::Type{T}) c. promote_operation(::typeof(*), ::Type{T}, ::Type{F1}, ::Type{F2}) where F1 and F2 are VariableIndex or ScalarAffineFunction d. promote_operation(::typeof(*), ::Type{T}, ::Type{<:Diagonal{T}}, ::Type{F}\n/ a. promote_operation(::typeof(/), ::Type{T}, ::Type{F}, ::Type{T})\nvcat a. promote_operation(::typeof(vcat), ::Type{T}, ::Type{F}...)\nimag a. promote_operation(::typeof(imag), ::Type{T}, ::Type{F}) where F is VariableIndex or VectorOfVariables\n\nIn each case, F (or F1 and F2) is one of the ten supported types, with a restriction that the mathematical operation makes sense, for example, we don't define promote_operation(-, T, F1, F2) where F1 is a scalar-valued function and F2 is a vector-valued function. The ten supported types are:\n\n::T\n::VariableIndex\n::ScalarAffineFunction{T}\n::ScalarQuadraticFunction{T}\n::ScalarNonlinearFunction\n::AbstractVector{T}\n::VectorOfVariables\n::VectorAffineFunction{T}\n::VectorQuadraticFunction{T}\n::VectorNonlinearFunction\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.operate","page":"API Reference","title":"MathOptInterface.Utilities.operate","text":"operate(\n op::Function,\n ::Type{T},\n args::Union{T,MOI.AbstractFunction}...,\n)::MOI.AbstractFunction where {T<:Number}\n\nReturns an MOI.AbstractFunction representing the function resulting from the operation op(args...) on functions of coefficient type T.\n\nNo argument can be modified.\n\nMethods\n\n+ a. operate(::typeof(+), ::Type{T}, ::F1) b. operate(::typeof(+), ::Type{T}, ::F1, ::F2) c. operate(::typeof(+), ::Type{T}, ::F1...)\n- a. operate(::typeof(-), ::Type{T}, ::F) b. operate(::typeof(-), ::Type{T}, ::F1, ::F2)\n* a. operate(::typeof(*), ::Type{T}, ::T, ::F) b. operate(::typeof(*), ::Type{T}, ::F, ::T) c. operate(::typeof(*), ::Type{T}, ::F1, ::F2) where F1 and F2 are VariableIndex or ScalarAffineFunction d. operate(::typeof(*), ::Type{T}, ::Diagonal{T}, ::F)\n/ a. operate(::typeof(/), ::Type{T}, ::F, ::T)\nvcat a. operate(::typeof(vcat), ::Type{T}, ::F...)\nimag a. operate(::typeof(imag), ::Type{T}, ::F) where F is VariableIndex or VectorOfVariables\n\nOne assumption is that the element type T is invariant under each operation. That is, op(::T, ::T)::T where op is a +, -, *, and /.\n\nIn each case, F (or F1 and F2) is one of the ten supported types, with a restriction that the mathematical operation makes sense, for example, we don't define promote_operation(-, T, F1, F2) where F1 is a scalar-valued function and F2 is a vector-valued function. The ten supported types are:\n\n::T\n::VariableIndex\n::ScalarAffineFunction{T}\n::ScalarQuadraticFunction{T}\n::ScalarNonlinearFunction\n::AbstractVector{T}\n::VectorOfVariables\n::VectorAffineFunction{T}\n::VectorQuadraticFunction{T}\n::VectorNonlinearFunction\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.operate!","page":"API Reference","title":"MathOptInterface.Utilities.operate!","text":"operate!(\n op::Function,\n ::Type{T},\n args::Union{T,MOI.AbstractFunction}...,\n)::MOI.AbstractFunction where {T<:Number}\n\nReturns an MOI.AbstractFunction representing the function resulting from the operation op(args...) on functions of coefficient type T.\n\nThe first argument may be modified, in which case the return value is identical to the first argument. For operations which cannot be implemented in-place, this function returns a new object.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.operate_output_index!","page":"API Reference","title":"MathOptInterface.Utilities.operate_output_index!","text":"operate_output_index!(\n op::Union{typeof(+),typeof(-)},\n ::Type{T},\n output_index::Integer,\n f::Union{AbstractVector{T},MOI.AbstractVectorFunction}\n g::Union{T,MOI.AbstractScalarFunction}...\n) where {T<:Number}\n\nReturn an MOI.AbstractVectorFunction in which the scalar function in row output_index is the result of op(f[output_index], g).\n\nThe functions at output index different to output_index are the same as the functions at the same output index in func. The first argument may be modified.\n\nMethods\n\n+ a. operate_output_index!(+, ::Type{T}, ::Int, ::VectorF, ::ScalarF)\n- a. operate_output_index!(-, ::Type{T}, ::Int, ::VectorF, ::ScalarF)\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.vectorize","page":"API Reference","title":"MathOptInterface.Utilities.vectorize","text":"vectorize(x::AbstractVector{<:Number})\n\nReturns x.\n\n\n\n\n\nvectorize(x::AbstractVector{MOI.VariableIndex})\n\nReturns the vector of scalar affine functions in the form of a MOI.VectorAffineFunction{T}.\n\n\n\n\n\nvectorize(funcs::AbstractVector{MOI.ScalarAffineFunction{T}}) where T\n\nReturns the vector of scalar affine functions in the form of a MOI.VectorAffineFunction{T}.\n\n\n\n\n\nvectorize(funcs::AbstractVector{MOI.ScalarQuadraticFunction{T}}) where T\n\nReturns the vector of scalar quadratic functions in the form of a MOI.VectorQuadraticFunction{T}.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#Constraint-utilities","page":"API Reference","title":"Constraint utilities","text":"","category":"section"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"The following utilities are available for moving the function constant to the set for scalar constraints:","category":"page"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"Utilities.shift_constant\nUtilities.supports_shift_constant\nUtilities.normalize_and_add_constraint\nUtilities.normalize_constant","category":"page"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.shift_constant","page":"API Reference","title":"MathOptInterface.Utilities.shift_constant","text":"shift_constant(set::MOI.AbstractScalarSet, offset)\n\nReturns a new scalar set new_set such that func-in-set is equivalent to func + offset-in-new_set.\n\nUse supports_shift_constant to check if the set supports shifting:\n\nif MOI.Utilities.supports_shift_constant(typeof(set))\n new_set = MOI.Utilities.shift_constant(set, -func.constant)\n func.constant = 0\n MOI.add_constraint(model, func, new_set)\nelse\n MOI.add_constraint(model, func, set)\nend\n\nNote for developers\n\nOnly define this function if it makes sense and you have implemented supports_shift_constant to return true.\n\nExample\n\njulia> import MathOptInterface as MOI\n\njulia> set = MOI.Interval(-2.0, 3.0)\nMathOptInterface.Interval{Float64}(-2.0, 3.0)\n\njulia> MOI.Utilities.supports_shift_constant(typeof(set))\ntrue\n\njulia> MOI.Utilities.shift_constant(set, 1.0)\nMathOptInterface.Interval{Float64}(-1.0, 4.0)\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.supports_shift_constant","page":"API Reference","title":"MathOptInterface.Utilities.supports_shift_constant","text":"supports_shift_constant(::Type{S}) where {S<:MOI.AbstractSet}\n\nReturn true if shift_constant is defined for set S.\n\nSee also shift_constant.\n\nExample\n\njulia> import MathOptInterface as MOI\n\njulia> MOI.Utilities.supports_shift_constant(MOI.Interval{Float64})\ntrue\n\njulia> MOI.Utilities.supports_shift_constant(MOI.ZeroOne)\nfalse\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.normalize_and_add_constraint","page":"API Reference","title":"MathOptInterface.Utilities.normalize_and_add_constraint","text":"normalize_and_add_constraint(\n model::MOI.ModelLike,\n func::MOI.AbstractScalarFunction,\n set::MOI.AbstractScalarSet;\n allow_modify_function::Bool = false,\n)\n\nAdds the scalar constraint obtained by moving the constant term in func to the set in model. If allow_modify_function is true then the function func can be modified.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.normalize_constant","page":"API Reference","title":"MathOptInterface.Utilities.normalize_constant","text":"normalize_constant(\n func::MOI.AbstractScalarFunction,\n set::MOI.AbstractScalarSet;\n allow_modify_function::Bool = false,\n)\n\nReturn the func-in-set constraint in normalized form. That is, if func is MOI.ScalarQuadraticFunction or MOI.ScalarAffineFunction, the constant is moved to the set. If allow_modify_function is true then the function func can be modified.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"The following utility identifies those constraints imposing bounds on a given variable, and returns those bound values:","category":"page"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"Utilities.get_bounds","category":"page"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.get_bounds","page":"API Reference","title":"MathOptInterface.Utilities.get_bounds","text":"get_bounds(model::MOI.ModelLike, ::Type{T}, x::MOI.VariableIndex)\n\nReturn a tuple (lb, ub) of type Tuple{T, T}, where lb and ub are lower and upper bounds, respectively, imposed on x in model.\n\n\n\n\n\nget_bounds(\n model::MOI.ModelLike,\n bounds_cache::Dict{MOI.VariableIndex,NTuple{2,T}},\n f::MOI.ScalarAffineFunction{T},\n) where {T} --> Union{Nothing,NTuple{2,T}}\n\nReturn the lower and upper bound of f as a tuple. If the domain is not bounded, return nothing.\n\n\n\n\n\nget_bounds(\n model::MOI.ModelLike,\n bounds_cache::Dict{MOI.VariableIndex,NTuple{2,T}},\n x::MOI.VariableIndex,\n) where {T} --> Union{Nothing,NTuple{2,T}}\n\nReturn the lower and upper bound of x as a tuple. If the domain is not bounded, return nothing.\n\nSimilar to get_bounds(::MOI.ModelLike, ::Type{T}, ::MOI.VariableIndex), except that the second argument is a cache which maps variables to their bounds and avoids repeated lookups.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"The following utilities are useful when working with symmetric matrix cones.","category":"page"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"Utilities.is_diagonal_vectorized_index\nUtilities.side_dimension_for_vectorized_dimension","category":"page"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.is_diagonal_vectorized_index","page":"API Reference","title":"MathOptInterface.Utilities.is_diagonal_vectorized_index","text":"is_diagonal_vectorized_index(index::Base.Integer)\n\nReturn whether index is the index of a diagonal element in a MOI.AbstractSymmetricMatrixSetTriangle set.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.side_dimension_for_vectorized_dimension","page":"API Reference","title":"MathOptInterface.Utilities.side_dimension_for_vectorized_dimension","text":"side_dimension_for_vectorized_dimension(n::Integer)\n\nReturn the dimension d such that MOI.dimension(MOI.PositiveSemidefiniteConeTriangle(d)) is n.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#Set-utilities","page":"API Reference","title":"Set utilities","text":"","category":"section"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"The following utilities are available for sets:","category":"page"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"Utilities.AbstractDistance\nUtilities.ProjectionUpperBoundDistance\nUtilities.distance_to_set\nUtilities.set_dot","category":"page"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.AbstractDistance","page":"API Reference","title":"MathOptInterface.Utilities.AbstractDistance","text":"abstract type AbstractDistance end\n\nAn abstract type used to enable dispatch of Utilities.distance_to_set.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.ProjectionUpperBoundDistance","page":"API Reference","title":"MathOptInterface.Utilities.ProjectionUpperBoundDistance","text":"ProjectionUpperBoundDistance() <: AbstractDistance\n\nAn upper bound on the minimum distance between point and the closest feasible point in set.\n\nDefinition of distance\n\nThe minimum distance is computed as:\n\nd(x mathcalK) = min_y in mathcalK x - y \n\nwhere x is point and mathcalK is set. The norm is computed as:\n\nx = sqrtf(x x mathcalK)\n\nwhere f is Utilities.set_dot.\n\nIn the default case, where the set does not have a specialized method for Utilities.set_dot, the norm is equivalent to the Euclidean norm x = sqrtsum x_i^2.\n\nWhy an upper bound?\n\nIn most cases, distance_to_set should return the smallest upper bound, but it may return a larger value if the smallest upper bound is expensive to compute.\n\nFor example, given an epigraph from of a conic set, (t x) f(x) le t, it may be simpler to return delta such that f(x) le t + delta, rather than computing the nearest projection onto the set.\n\nIf the distance is not the smallest upper bound, the docstring of the appropriate distance_to_set method must describe the way that the distance is computed.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.distance_to_set","page":"API Reference","title":"MathOptInterface.Utilities.distance_to_set","text":"distance_to_set(\n [d::AbstractDistance = ProjectionUpperBoundDistance()],]\n point::T,\n set::MOI.AbstractScalarSet,\n) where {T}\n\ndistance_to_set(\n [d::AbstractDistance = ProjectionUpperBoundDistance(),]\n point::AbstractVector{T},\n set::MOI.AbstractVectorSet,\n) where {T}\n\nCompute the distance between point and set using the distance metric d. If point is in the set set, this function must return zero(T).\n\nIf d is omitted, the default distance is Utilities.ProjectionUpperBoundDistance.\n\n\n\n\n\ndistance_to_set(::ProjectionUpperBoundDistance, x, ::MOI.RotatedSecondOrderCone)\n\nLet (t, u, y...) = x. Return the 2-norm of the vector d such that in x + d, u is projected to 1 if u <= 0, and t is increased such that x + d belongs to the set.\n\n\n\n\n\ndistance_to_set(::ProjectionUpperBoundDistance, x, ::MOI.ExponentialCone)\n\nLet (u, v, w) = x. If v > 0, return the epigraph distance d such that (u, v, w + d) belongs to the set.\n\nIf v <= 0 return the 2-norm of the vector d such that x + d = (u, 1, z) where z satisfies the constraints.\n\n\n\n\n\ndistance_to_set(::ProjectionUpperBoundDistance, x, ::MOI.DualExponentialCone)\n\nLet (u, v, w) = x. If u < 0, return the epigraph distance d such that (u, v, w + d) belongs to the set.\n\nIf u >= 0 return the 2-norm of the vector d such that x + d = (u, -1, z) where z satisfies the constraints.\n\n\n\n\n\ndistance_to_set(::ProjectionUpperBoundDistance, x, ::MOI.GeometricMeanCone)\n\nLet (t, y...) = x. If all y are non-negative, return the epigraph distance d such that (t + d, y...) belongs to the set.\n\nIf any y are strictly negative, return the 2-norm of the vector d that projects negative y elements to 0 and t to ℝ₋.\n\n\n\n\n\ndistance_to_set(::ProjectionUpperBoundDistance, x, ::MOI.PowerCone)\n\nLet (a, b, c) = x. If a and b are non-negative, return the epigraph distance required to increase c such that the constraint is satisfied.\n\nIf a or b is strictly negative, return the 2-norm of the vector d such that in the vector x + d: c, and any negative a and b are projected to 0.\n\n\n\n\n\ndistance_to_set(::ProjectionUpperBoundDistance, x, ::MOI.DualPowerCone)\n\nLet (a, b, c) = x. If a and b are non-negative, return the epigraph distance required to increase c such that the constraint is satisfied.\n\nIf a or b is strictly negative, return the 2-norm of the vector d such that in the vector x + d: c, and any negative a and b are projected to 0.\n\n\n\n\n\ndistance_to_set(::ProjectionUpperBoundDistance, x, ::MOI.NormOneCone)\n\nLet (t, y...) = x. Return the epigraph distance d such that (t + d, y...) belongs to the set.\n\n\n\n\n\ndistance_to_set(::ProjectionUpperBoundDistance, x, ::MOI.NormInfinityCone)\n\nLet (t, y...) = x. Return the epigraph distance d such that (t + d, y...) belongs to the set.\n\n\n\n\n\ndistance_to_set(::ProjectionUpperBoundDistance, x, ::MOI.RelativeEntropyCone)\n\nLet (u, v..., w...) = x. If v and w are strictly positive, return the epigraph distance required to increase u such that the constraint is satisfied.\n\nIf any elements in v or w are non-positive, return the 2-norm of the vector d such that in the vector x + d: any non-positive elements in v and w are projected to 1, and u is projected such that the epigraph constraint holds.\n\n\n\n\n\ndistance_to_set(::ProjectionUpperBoundDistance, x, set::MOI.NormCone)\n\nLet (t, y...) = x. Return the epigraph distance d such that (t + d, y...) belongs to the set.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.set_dot","page":"API Reference","title":"MathOptInterface.Utilities.set_dot","text":"set_dot(x::AbstractVector, y::AbstractVector, set::AbstractVectorSet)\n\nReturn the scalar product between a vector x of the set set and a vector y of the dual of the set s.\n\n\n\n\n\nset_dot(x, y, set::AbstractScalarSet)\n\nReturn the scalar product between a number x of the set set and a number y of the dual of the set s.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#DoubleDicts","page":"API Reference","title":"DoubleDicts","text":"","category":"section"},{"location":"moi/submodules/Utilities/reference/","page":"API Reference","title":"API Reference","text":"Utilities.DoubleDicts.DoubleDict\nUtilities.DoubleDicts.DoubleDictInner\nUtilities.DoubleDicts.IndexDoubleDict\nUtilities.DoubleDicts.IndexDoubleDictInner\nUtilities.DoubleDicts.outer_keys\nUtilities.DoubleDicts.nonempty_outer_keys","category":"page"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.DoubleDicts.DoubleDict","page":"API Reference","title":"MathOptInterface.Utilities.DoubleDicts.DoubleDict","text":"DoubleDict{V}\n\nAn optimized dictionary to map MOI.ConstraintIndex to values of type V.\n\nWorks as a AbstractDict{MOI.ConstraintIndex,V} with minimal differences.\n\nIf V is also a MOI.ConstraintIndex, use IndexDoubleDict.\n\nNote that MOI.ConstraintIndex is not a concrete type, opposed to MOI.ConstraintIndex{MOI.VariableIndex, MOI.Integers}, which is a concrete type.\n\nWhen looping through multiple keys of the same Function-in-Set type, use\n\ninner = dict[F, S]\n\nto return a type-stable DoubleDictInner.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.DoubleDicts.DoubleDictInner","page":"API Reference","title":"MathOptInterface.Utilities.DoubleDicts.DoubleDictInner","text":"DoubleDictInner{F,S,V}\n\nA type stable inner dictionary of DoubleDict.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.DoubleDicts.IndexDoubleDict","page":"API Reference","title":"MathOptInterface.Utilities.DoubleDicts.IndexDoubleDict","text":"IndexDoubleDict\n\nA specialized version of [DoubleDict] in which the values are of type MOI.ConstraintIndex\n\nWhen looping through multiple keys of the same Function-in-Set type, use\n\ninner = dict[F, S]\n\nto return a type-stable IndexDoubleDictInner.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.DoubleDicts.IndexDoubleDictInner","page":"API Reference","title":"MathOptInterface.Utilities.DoubleDicts.IndexDoubleDictInner","text":"IndexDoubleDictInner{F,S}\n\nA type stable inner dictionary of IndexDoubleDict.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.DoubleDicts.outer_keys","page":"API Reference","title":"MathOptInterface.Utilities.DoubleDicts.outer_keys","text":"outer_keys(d::AbstractDoubleDict)\n\nReturn an iterator over the outer keys of the AbstractDoubleDict d. Each outer key is a Tuple{Type,Type} so that a double loop can be easily used:\n\nfor (F, S) in DoubleDicts.outer_keys(dict)\n for (k, v) in dict[F, S]\n # ...\n end\nend\n\nFor performance, it is recommended that the inner loop lies in a separate function to guarantee type-stability. Some outer keys (F, S) might lead to an empty dict[F, S]. If you want only nonempty dict[F, S], use nonempty_outer_keys.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Utilities/reference/#MathOptInterface.Utilities.DoubleDicts.nonempty_outer_keys","page":"API Reference","title":"MathOptInterface.Utilities.DoubleDicts.nonempty_outer_keys","text":"nonempty_outer_keys(d::AbstractDoubleDict)\n\nReturn a vector of outer keys of the AbstractDoubleDict d.\n\nOnly outer keys that have a nonempty set of inner keys will be returned.\n\nEach outer key is a Tuple{Type,Type} so that a double loop can be easily used\n\nfor (F, S) in DoubleDicts.nonempty_outer_keys(dict)\n for (k, v) in dict[F, S]\n # ...\n end\nend\nFor performance, it is recommended that the inner loop lies in a separate\nfunction to guarantee type-stability.\n\nIf you want an iterator of all current outer keys, use [`outer_keys`](@ref).\n\n\n\n\n\n","category":"function"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"EditURL = \"https://github.com/jump-dev/Clp.jl/blob/v1.1.0/README.md\"","category":"page"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"(Image: )","category":"page"},{"location":"packages/Clp/#Clp.jl","page":"jump-dev/Clp.jl","title":"Clp.jl","text":"","category":"section"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"(Image: Build Status) (Image: codecov)","category":"page"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"Clp.jl is a wrapper for the COIN-OR Linear Programming solver.","category":"page"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"The wrapper has two components:","category":"page"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"a thin wrapper around the complete C API\nan interface to MathOptInterface","category":"page"},{"location":"packages/Clp/#Affiliation","page":"jump-dev/Clp.jl","title":"Affiliation","text":"","category":"section"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"This wrapper is maintained by the JuMP community and is not a COIN-OR project.","category":"page"},{"location":"packages/Clp/#Getting-help","page":"jump-dev/Clp.jl","title":"Getting help","text":"","category":"section"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"If you need help, please ask a question on the JuMP community forum.","category":"page"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"If you have a reproducible example of a bug, please open a GitHub issue.","category":"page"},{"location":"packages/Clp/#License","page":"jump-dev/Clp.jl","title":"License","text":"","category":"section"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"Clp.jl is licensed under the MIT License.","category":"page"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"The underlying solver, coin-or/Clp, is licensed under the Eclipse public license.","category":"page"},{"location":"packages/Clp/#Installation","page":"jump-dev/Clp.jl","title":"Installation","text":"","category":"section"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"Install Clp using Pkg.add:","category":"page"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"import Pkg\nPkg.add(\"Clp\")","category":"page"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"In addition to installing the Clp.jl package, this will also download and install the Clp binaries. You do not need to install Clp separately.","category":"page"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"To use a custom binary, read the Custom solver binaries section of the JuMP documentation.","category":"page"},{"location":"packages/Clp/#Use-with-JuMP","page":"jump-dev/Clp.jl","title":"Use with JuMP","text":"","category":"section"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"To use Clp with JuMP, use Clp.Optimizer:","category":"page"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"using JuMP, Clp\nmodel = Model(Clp.Optimizer)\nset_attribute(model, \"LogLevel\", 1)\nset_attribute(model, \"Algorithm\", 4)","category":"page"},{"location":"packages/Clp/#MathOptInterface-API","page":"jump-dev/Clp.jl","title":"MathOptInterface API","text":"","category":"section"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"The Clp optimizer supports the following constraints and attributes.","category":"page"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"List of supported objective functions:","category":"page"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}","category":"page"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"List of supported variable types:","category":"page"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"MOI.Reals","category":"page"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"List of supported constraint types:","category":"page"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"MOI.ScalarAffineFunction{Float64} in MOI.EqualTo{Float64}\nMOI.ScalarAffineFunction{Float64} in MOI.GreaterThan{Float64}\nMOI.ScalarAffineFunction{Float64} in MOI.Interval{Float64}\nMOI.ScalarAffineFunction{Float64} in MOI.LessThan{Float64}\nMOI.VariableIndex in MOI.EqualTo{Float64}\nMOI.VariableIndex in MOI.GreaterThan{Float64}\nMOI.VariableIndex in MOI.Interval{Float64}\nMOI.VariableIndex in MOI.LessThan{Float64}","category":"page"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"List of supported model attributes:","category":"page"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"MOI.ObjectiveSense()","category":"page"},{"location":"packages/Clp/#Options","page":"jump-dev/Clp.jl","title":"Options","text":"","category":"section"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"Options are, unfortunately, not well documented.","category":"page"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"The following options are likely to be the most useful:","category":"page"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"Parameter Example Explanation\nPrimalTolerance 1e-7 Primal feasibility tolerance\nDualTolerance 1e-7 Dual feasibility tolerance\nDualObjectiveLimit 1e308 When using dual simplex (where the objective is monotonically changing), terminate when the objective exceeds this limit\nMaximumIterations 2147483647 Terminate after performing this number of simplex iterations\nMaximumSeconds -1.0 Terminate after this many seconds have passed. A negative value means no time limit\nLogLevel 1 Set to 1, 2, 3, or 4 for increasing output. Set to 0 to disable output\nPresolveType 0 Set to 1 to disable presolve\nSolveType 5 Solution method: dual simplex (0), primal simplex (1), sprint (2), barrier with crossover (3), barrier without crossover (4), automatic (5)\nInfeasibleReturn 0 Set to 1 to return as soon as the problem is found to be infeasible (by default, an infeasibility proof is computed as well)\nScaling 3 0 -off, 1 equilibrium, 2 geometric, 3 auto, 4 dynamic(later)\nPerturbation 100 switch on perturbation (50), automatic (100), don't try perturbing (102)","category":"page"},{"location":"packages/Clp/#C-API","page":"jump-dev/Clp.jl","title":"C API","text":"","category":"section"},{"location":"packages/Clp/","page":"jump-dev/Clp.jl","title":"jump-dev/Clp.jl","text":"The C API can be accessed via Clp.Clp_XXX functions, where the names and arguments are identical to the C API.","category":"page"},{"location":"background/bibliography/#Bibliography","page":"Bibliography","title":"Bibliography","text":"","category":"section"},{"location":"background/bibliography/","page":"Bibliography","title":"Bibliography","text":"Barvinok, A. (2002). A course in convexity. Vol. 54 of Graduate studies in mathematics (American Mathematical Society).\n\n\n\nBen-Tal, A. and Nemirovski, A. (2001). Lectures on Modern Convex Optimization (Society for Industrial and Applied Mathematics).\n\n\n\nBertsimas, D.; Gupta, V. and Kallus, N. (2018). Data-driven robust optimization. Mathematical Programming 167, 235–292.\n\n\n\nBetts, J. T. (2010). Practical Methods for Optimal Control and Estimation Using Nonlinear Programming. Second Edition (Society for Industrial and Applied Mathematics).\n\n\n\nBoyd, S. and Vandenberghe, L. (2004). Convex Optimization (Cambridge University Press, Cambridge).\n\n\n\nBukhsh, W. A.; Grothey, A.; McKinnon, K. I. and Trodden, P. A. (2013). Local Solutions of the Optimal Power Flow Problem. IEEE Transactions on Power Systems 28, 4780–4788.\n\n\n\nCornuéjols, G.; Peña, J. and Tütüncü, R. (2018). Optimization Methods in Finance. 2 Edition (Cambridge University Press).\n\n\n\nD’Aertrycke, G.; Ehrenmann, A.; Ralph, D. and Smeers, Y. (2017). Risk trading in capacity equilibrium models (Cambridge Working Papers in Economics (CWPE)).\n\n\n\nFerris, M. C.; Mangasarian, O. L. and Wright, S. J. (2007). Linear Programming with MATLAB (Society for Industrial and Applied Mathematics).\n\n\n\nGoemans, M. X. and Williamson, D. P. (1995). Improved Approximation Algorithms for Maximum Cut and Satisfiability Problems Using Semidefinite Programming. J. ACM 42, 1115–1145.\n\n\n\nJabr, R. A. (2012). Exploiting Sparsity in SDP Relaxations of the OPF Problem. IEEE Transactions on Power Systems 27, 1138–1139.\n\n\n\nKnuth, D. E. (1994). The sandwich theorem. The Electronic Journal of Combinatorics 1.\n\n\n\nKrasko, V. and Rebennack, S. (2017). Global Optimization: Optimal Power Flow Problem. In: Advances and Trends in Optimization with Engineering Applications, edited by Terlaky, T.; Anjos, M. F. and Ahmed, S. (Society for Industrial and Applied Mathematics, Philadelphia, PA); Chapter 15, pp. 187–205.\n\n\n\nLinial, N. (2002). Finite Metric Spaces: Combinatorics, Geometry and Algorithms. In: Proceedings of the Eighteenth Annual Symposium on Computational Geometry, SCG '02 (Association for Computing Machinery, New York, NY, USA); p. 63.\n\n\n\nMatoušek, J. (2013). Lectures on discrete geometry. Vol. 212 no. 1 of Graduate Texts in Mathematics (Springer Science & Business Media).\n\n\n\nPeng, J. and Wei, Y. (2007). Approximating K‐means‐type Clustering via Semidefinite Programming. SIAM Journal on Optimization 18, 186–205.\n\n\n\nZimmerman, R. D.; Murillo-Sánchez, C. E. and Thomas, R. J. (2011). MATPOWER: Steady-State Operations, Planning, and Analysis Tools for Power Systems Research and Education. IEEE Transactions on Power Systems 26, 12–19.\n\n\n\n","category":"page"},{"location":"moi/","page":"Introduction","title":"Introduction","text":"EditURL = \"https://github.com/jump-dev/MathOptInterface.jl/blob/v1.34.0/docs/src/index.md\"","category":"page"},{"location":"moi/#moi_documentation","page":"Introduction","title":"Introduction","text":"","category":"section"},{"location":"moi/","page":"Introduction","title":"Introduction","text":"warning: Warning\nThis documentation in this section is a copy of the official MathOptInterface documentation available at https://jump.dev/MathOptInterface.jl/v1.34.0. It is included here to make it easier to link concepts between JuMP and MathOptInterface.","category":"page"},{"location":"moi/#What-is-MathOptInterface?","page":"Introduction","title":"What is MathOptInterface?","text":"","category":"section"},{"location":"moi/","page":"Introduction","title":"Introduction","text":"MathOptInterface.jl (MOI) is an abstraction layer designed to provide a unified interface to mathematical optimization solvers so that users do not need to understand multiple solver-specific APIs.","category":"page"},{"location":"moi/","page":"Introduction","title":"Introduction","text":"tip: Tip\nThis documentation is aimed at developers writing software interfaces to solvers and modeling languages using the MathOptInterface API. If you are a user interested in solving optimization problems, we encourage you instead to use MOI through a higher-level modeling interface like JuMP or Convex.jl.","category":"page"},{"location":"moi/#How-the-documentation-is-structured","page":"Introduction","title":"How the documentation is structured","text":"","category":"section"},{"location":"moi/","page":"Introduction","title":"Introduction","text":"Having a high-level overview of how this documentation is structured will help you know where to look for certain things.","category":"page"},{"location":"moi/","page":"Introduction","title":"Introduction","text":"The Tutorials section contains articles on how to use and implement the MathOptInteraface API. Look here if you want to write a model in MOI, or write an interface to a new solver.\nThe Manual contains short code-snippets that explain how to use the MOI API. Look here for more details on particular areas of MOI.\nThe Background section contains articles on the theory behind MathOptInterface. Look here if you want to understand why, rather than how.\nThe API Reference contains a complete list of functions and types that comprise the MOI API. Look here if you want to know how to use (or implement) a particular function.\nThe Submodules section contains stand-alone documentation for each of the submodules within MOI. These submodules are not required to interface a solver with MOI, but they make the job much easier.","category":"page"},{"location":"moi/#Citing-MathOptInterface","page":"Introduction","title":"Citing MathOptInterface","text":"","category":"section"},{"location":"moi/","page":"Introduction","title":"Introduction","text":"If you find MathOptInterface useful in your work, we kindly request that you cite the following paper:","category":"page"},{"location":"moi/","page":"Introduction","title":"Introduction","text":"@article{legat2021mathoptinterface,\n title={{MathOptInterface}: a data structure for mathematical optimization problems},\n author={Legat, Beno{\\^\\i}t and Dowson, Oscar and Garcia, Joaquim Dias and Lubin, Miles},\n journal={INFORMS Journal on Computing},\n year={2021},\n doi={10.1287/ijoc.2021.1067},\n publisher={INFORMS}\n}","category":"page"},{"location":"moi/","page":"Introduction","title":"Introduction","text":"A preprint of this paper is freely available.","category":"page"},{"location":"packages/OSQP/","page":"osqp/OSQP.jl","title":"osqp/OSQP.jl","text":"EditURL = \"https://github.com/osqp/OSQP.jl/blob/v0.8.1/README.md\"","category":"page"},{"location":"packages/OSQP/#OSQP.jl","page":"osqp/OSQP.jl","title":"OSQP.jl","text":"","category":"section"},{"location":"packages/OSQP/","page":"osqp/OSQP.jl","title":"osqp/OSQP.jl","text":"(Image: Build Status) (Image: codecov.io)","category":"page"},{"location":"packages/OSQP/","page":"osqp/OSQP.jl","title":"osqp/OSQP.jl","text":"OSQP.jl is a Julia wrapper for OSQP: the Operator Splitting QP Solver.","category":"page"},{"location":"packages/OSQP/#License","page":"osqp/OSQP.jl","title":"License","text":"","category":"section"},{"location":"packages/OSQP/","page":"osqp/OSQP.jl","title":"osqp/OSQP.jl","text":"OSQP.jl is licensed under the Apache-2.0 license.","category":"page"},{"location":"packages/OSQP/","page":"osqp/OSQP.jl","title":"osqp/OSQP.jl","text":"The upstream solver, osqp/osqp is also licensed under the Apache-2.0 license.","category":"page"},{"location":"packages/OSQP/#Installation","page":"osqp/OSQP.jl","title":"Installation","text":"","category":"section"},{"location":"packages/OSQP/","page":"osqp/OSQP.jl","title":"osqp/OSQP.jl","text":"Install OSQP.jl using the Julia package manager","category":"page"},{"location":"packages/OSQP/","page":"osqp/OSQP.jl","title":"osqp/OSQP.jl","text":"import Pkg\nPkg.add(\"OSQP\")","category":"page"},{"location":"packages/OSQP/#Problem-class","page":"osqp/OSQP.jl","title":"Problem class","text":"","category":"section"},{"location":"packages/OSQP/","page":"osqp/OSQP.jl","title":"osqp/OSQP.jl","text":"The OSQP (Operator Splitting Quadratic Program) solver is a numerical optimization package for solving problems in the form","category":"page"},{"location":"packages/OSQP/","page":"osqp/OSQP.jl","title":"osqp/OSQP.jl","text":"minimize 0.5 x' P x + q' x\n\nsubject to l <= A x <= u","category":"page"},{"location":"packages/OSQP/","page":"osqp/OSQP.jl","title":"osqp/OSQP.jl","text":"where x in R^n is the optimization variable. The objective function is defined by a positive semidefinite matrix P in S^n_+ and vector q in R^n. The linear constraints are defined by matrix A in R^{m x n} and vectors l in R^m U {-inf}^m, u in R^m U {+inf}^m.","category":"page"},{"location":"packages/OSQP/#Documentation","page":"osqp/OSQP.jl","title":"Documentation","text":"","category":"section"},{"location":"packages/OSQP/","page":"osqp/OSQP.jl","title":"osqp/OSQP.jl","text":"Detailed documentation is available at https://osqp.org/.","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"EditURL = \"classifiers.jl\"","category":"page"},{"location":"tutorials/nonlinear/classifiers/#Example:-classification-problems","page":"Example: classification problems","title":"Example: classification problems","text":"","category":"section"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"This tutorial was generated using Literate.jl. Download the source as a .jl file.","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"The purpose of this tutorial is to show how JuMP can be used to formulate classification problems.","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"Classification problems deal with constructing functions, called classifiers, that can efficiently classify data into two or more distinct sets. A common application is classifying previously unseen data points after training a classifier on known data.","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"The theory and models in this tutorial come from Section 9.4 of (Ferris et al., 2007).","category":"page"},{"location":"tutorials/nonlinear/classifiers/#Required-packages","page":"Example: classification problems","title":"Required packages","text":"","category":"section"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"This tutorial uses the following packages:","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"using JuMP\nimport DelimitedFiles\nimport Ipopt\nimport LinearAlgebra\nimport Plots\nimport Random\nimport Test","category":"page"},{"location":"tutorials/nonlinear/classifiers/#Data-and-visualisation","page":"Example: classification problems","title":"Data and visualisation","text":"","category":"section"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"To start, let's generate some points to test with. The argument m is the number of 2-dimensional points:","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"function generate_test_points(m; random_seed = 1)\n rng = Random.MersenneTwister(random_seed)\n return 2.0 .* rand(rng, Float64, m, 2)\nend","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"For the sake of the example, let's take m = 100:","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"P = generate_test_points(100);\nnothing #hide","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"The points are represented row-wise in the matrix P. Let's visualise the points using the Plots package:","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"plot = Plots.scatter(\n P[:, 1],\n P[:, 2];\n xlim = (0, 2.02),\n ylim = (0, 2.02),\n color = :white,\n size = (600, 600),\n legend = false,\n)","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"We want to split the points into two distinct sets on either side of a dividing line. We'll then label each point depending on which side of the line it happens to fall. Based on the labels of the point, we'll show how to create a classifier using a JuMP model. We can then test how well our classifier reproduces the original labels and the boundary between them.","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"Let's make a line to divide the points into two sets by defining a gradient and a constant:","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"w_0, g_0 = [5, 3], 8\nline(v::AbstractArray; w = w_0, g = g_0) = w' * v - g\nline(x::Real; w = w_0, g = g_0) = -(w[1] * x - g) / w[2];\nnothing #hide","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"Julia's multiple dispatch feature allows us to define the vector and single-variable form of the line function under the same name.","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"Let's add this to the plot:","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"Plots.plot!(plot, line; linewidth = 5)","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"Now we label the points relative to which side of the line they are. It is numerically useful to have the labels +1 and -1 for the upcoming JuMP formulation.","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"labels = ifelse.(line.(eachrow(P)) .>= 0, 1, -1)\nPlots.scatter!(\n plot,\n P[:, 1],\n P[:, 2];\n shape = ifelse.(labels .== 1, :cross, :xcross),\n markercolor = ifelse.(labels .== 1, :blue, :crimson),\n markersize = 8,\n)","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"Our goal is to show we can reconstruct the line from just the points and the labels.","category":"page"},{"location":"tutorials/nonlinear/classifiers/#Formulation:-linear-support-vector-machine","page":"Example: classification problems","title":"Formulation: linear support vector machine","text":"","category":"section"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"A classifier known as the linear support vector machine (SVM) looks for the affine function L(p) = w^top p - g that satisfies L(p) 0 for all points p with a label -1 and L(p) ge 0 for all points p with a label +1.","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"The linearly constrained quadratic program that implements this is:","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"beginaligned\nmin_w in mathbbR^n g in mathbbR y in mathbbR^m quad frac12 w^top w + C sum_i=1^m y_i \ntextsubject to quad D cdot (P w - g) + y geq mathbf1 \n y ge 0\nendaligned","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"where D is a diagonal matrix of the labels.","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"We need a default value for the positive penalty parameter C:","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"C_0 = 100.0;\nnothing #hide","category":"page"},{"location":"tutorials/nonlinear/classifiers/#JuMP-formulation","page":"Example: classification problems","title":"JuMP formulation","text":"","category":"section"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"Here is the JuMP model:","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"function solve_SVM_classifier(P::Matrix, labels::Vector; C::Float64 = C_0)\n m, n = size(P)\n model = Model(Ipopt.Optimizer)\n set_silent(model)\n @variable(model, w[1:n])\n @variable(model, g)\n @variable(model, y[1:m] >= 0)\n @objective(model, Min, 1 / 2 * w' * w + C * sum(y))\n D = LinearAlgebra.Diagonal(labels)\n @constraint(model, D * (P * w .- g) .+ y .>= 1)\n optimize!(model)\n Test.@test is_solved_and_feasible(model)\n slack = extrema(value.(y))\n println(\"Minimum slack: \", slack[1], \"\\nMaximum slack: \", slack[2])\n classifier(x) = line(x; w = value.(w), g = value(g))\n return model, classifier\nend","category":"page"},{"location":"tutorials/nonlinear/classifiers/#Results","page":"Example: classification problems","title":"Results","text":"","category":"section"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"Let's recover the values that define the classifier by solving the model:","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"_, classifier = solve_SVM_classifier(P, labels)","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"With the solution, we can ask: was the value of the penalty constant \"sufficiently large\" for this data set? This can be judged in part by the range of the slack variables. If the slack is too large, then we need to increase the penalty constant.","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"Let's plot the solution and check how we did:","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"Plots.plot!(plot, classifier; linewidth = 5, linestyle = :dashdotdot)","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"We find that we have recovered the dividing line from just the information of the points and their labels.","category":"page"},{"location":"tutorials/nonlinear/classifiers/#Nonseparable-classes-of-points","page":"Example: classification problems","title":"Nonseparable classes of points","text":"","category":"section"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"Now, what if the point sets are not cleanly separable by a line (or a hyperplane in higher dimensions)? Does this still work? Let's repeat the process, but this time we will simulate nonseparable classes of points by intermingling a few nearby points across the previously used line.","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"nearby_indices = abs.(line.(eachrow(P))) .< 1.1\nlabels_new = ifelse.(nearby_indices, -labels, labels)\nmodel, classifier = solve_SVM_classifier(P, labels_new)\nplot = Plots.scatter(\n P[:, 1],\n P[:, 2];\n xlim = (0, 2.02),\n ylim = (0, 2.02),\n color = :white,\n size = (600, 600),\n legend = false,\n)\nPlots.scatter!(\n plot,\n P[:, 1],\n P[:, 2];\n shape = ifelse.(labels_new .== 1, :cross, :xcross),\n markercolor = ifelse.(labels_new .== 1, :blue, :crimson),\n markersize = 8,\n)\nPlots.plot!(plot, classifier; linewidth = 5, linestyle = :dashdotdot)","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"So our JuMP formulation still produces a classifier, but it mis-classifies some of the nonseparable points.","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"We can find out which points are contributing to the shape of the line by looking at the dual values of the affine constraints and comparing them to the penalty constant C:","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"affine_cons = all_constraints(model, AffExpr, MOI.GreaterThan{Float64})\nactive_cons = findall(isapprox.(dual.(affine_cons), C_0; atol = 0.001))\nfindall(nearby_indices) ⊆ active_cons","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"The last statement tells us that our nonseparable points are actively contributing to how the classifier is defined. The remaining points are of interest and are highlighted:","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"P_active = P[setdiff(active_cons, findall(nearby_indices)), :]\nPlots.scatter!(\n plot,\n P_active[:, 1],\n P_active[:, 2];\n shape = :hexagon,\n markersize = 8,\n markeropacity = 0.5,\n)","category":"page"},{"location":"tutorials/nonlinear/classifiers/#Advanced:-duality-and-the-kernel-method","page":"Example: classification problems","title":"Advanced: duality and the kernel method","text":"","category":"section"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"We now consider an alternative formulation for a linear SVM by solving the dual problem.","category":"page"},{"location":"tutorials/nonlinear/classifiers/#The-dual-program","page":"Example: classification problems","title":"The dual program","text":"","category":"section"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"The dual of the linear SVM program is also a linearly constrained quadratic program:","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"beginaligned\nmin_u in mathbbR^m quad frac12 u^top D P P^top D u - mathbf1^top u \ntextsubject to quad mathbf1^top D u = 0 \n 0 leq u leq Cmathbf1\nendaligned","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"This is the JuMP model:","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"function solve_dual_SVM_classifier(P::Matrix, labels::Vector; C::Float64 = C_0)\n m, n = size(P)\n model = Model(Ipopt.Optimizer)\n set_silent(model)\n @variable(model, 0 <= u[1:m] <= C)\n D = LinearAlgebra.Diagonal(labels)\n @objective(model, Min, 1 / 2 * u' * D * P * P' * D * u - sum(u))\n @constraint(model, con, sum(D * u) == 0)\n optimize!(model)\n Test.@test is_solved_and_feasible(model)\n w = P' * D * value.(u)\n g = dual(con)\n classifier(x) = line(x; w = w, g = g)\n return classifier\nend","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"We recover the line gradient vector w through setting w = P^top D u, and the line constant g as the dual value of the single affine constraint.","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"The dual problem has fewer variables and fewer constraints, so in many cases it may be simpler to solve the dual form.","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"We can check that the dual form has recovered a classifier:","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"classifier = solve_dual_SVM_classifier(P, labels)\nPlots.plot!(plot, classifier; linewidth = 5, linestyle = :dash)","category":"page"},{"location":"tutorials/nonlinear/classifiers/#The-kernel-method","page":"Example: classification problems","title":"The kernel method","text":"","category":"section"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"Linear SVM techniques are not limited to finding separating hyperplanes in the original space of the dataset. One could first transform the training data under a nonlinear mapping, apply our method, then map the hyperplane back into original space.","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"The actual data describing the point set is held in a matrix P, but looking at the dual program we see that what actually matters is the Gram matrix P P^top, expressing a pairwise comparison (an inner-product) between each point vector. It follows that any mapping of the point set only needs to be defined at the level of pairwise maps between points. Such maps are known as kernel functions:","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"k mathbbR^n times mathbbR^n rightarrow mathbbR qquad\n(s t) mapsto left Phi(s) Phi(t) right","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"where the right-hand side applies some transformation Phi mathbbR^n rightarrow mathbbR^n followed by an inner-product in that image space.","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"In practice, we can avoid having Phi explicitly given but instead define a kernel function directly between pairs of vectors. This change to using a kernel function without knowing the map is called the kernel method (or sometimes, the kernel trick).","category":"page"},{"location":"tutorials/nonlinear/classifiers/#Classifier-using-a-Gaussian-kernel","page":"Example: classification problems","title":"Classifier using a Gaussian kernel","text":"","category":"section"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"We will demonstrate the application of a Gaussian or radial basis function kernel:","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"k(s t) = expleft( -mu lVert s - t rVert^2_2 right)","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"for some positive parameter mu.","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"k_gauss(s::Vector, t::Vector; μ = 0.5) = exp(-μ * LinearAlgebra.norm(s - t)^2)","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"Given a matrix of points expressed row-wise and a kernel, the next function returns the transformed matrix K that replaces P P^top:","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"function pairwise_transform(kernel::Function, P::Matrix{T}) where {T}\n m, n = size(P)\n K = zeros(T, m, m)\n for j in 1:m, i in 1:j\n K[i, j] = K[j, i] = kernel(P[i, :], P[j, :])\n end\n return LinearAlgebra.Symmetric(K)\nend","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"Now we're ready to define our optimization problem. We need to provide the kernel function to be used in the problem. Note that any extra keyword arguments here (like parameter values) are passed through to the kernel.","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"function solve_kernel_SVM_classifier(\n kernel::Function,\n P::Matrix,\n labels::Vector;\n C::Float64 = C_0,\n kwargs...,\n)\n m, n = size(P)\n K = pairwise_transform(kernel, P)\n model = Model(Ipopt.Optimizer)\n set_silent(model)\n @variable(model, 0 <= u[1:m] <= C)\n D = LinearAlgebra.Diagonal(labels)\n con = @constraint(model, sum(D * u) == 0)\n @objective(model, Min, 1 / 2 * u' * D * K * D * u - sum(u))\n optimize!(model)\n Test.@test is_solved_and_feasible(model)\n u_sol, g_sol = value.(u), dual(con)\n function classifier(v::Vector)\n return sum(\n D[i, i] * u_sol[i] * kernel(P[i, :], v; kwargs...) for i in 1:m\n ) - g_sol\n end\n return classifier\nend","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"This time, we don't recover the line gradient vector w directly. Instead, we compute the classifier f using the function:","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":" f(v) = sum_i=1^m D_ii u_i k(p_i v ) - g","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"where p_i is row vector i of P.","category":"page"},{"location":"tutorials/nonlinear/classifiers/#Checkerboard-dataset","page":"Example: classification problems","title":"Checkerboard dataset","text":"","category":"section"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"To demonstrate this nonlinear technique, we'll use the checkerboard dataset.","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"filename = joinpath(@__DIR__, \"data\", \"checker\", \"checker.txt\")\ncheckerboard = DelimitedFiles.readdlm(filename, ' ', Int)\nlabels = ifelse.(iszero.(checkerboard[:, 1]), -1, 1)\nB = checkerboard[:, 2:3] ./ 100.0 # rescale to [0,2] x [0,2] square.\nplot = Plots.scatter(\n B[:, 1],\n B[:, 2];\n color = ifelse.(labels .== 1, :white, :black),\n markersize = ifelse.(labels .== 1, 4, 2),\n size = (600, 600),\n legend = false,\n)","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"Is the technique capable of generating a distinctly nonlinear surface? Let's solve the Gaussian kernel based quadratic problem with these parameters:","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"classifier = solve_kernel_SVM_classifier(k_gauss, B, labels; C = 1e5, μ = 10.0)\ngrid = [[x, y] for x in 0:0.01:2, y in 0:0.01:2]\ngrid_pos = [Tuple(g) for g in grid if classifier(g) >= 0]\nPlots.scatter!(plot, grid_pos; markersize = 0.2)","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"We find that the kernel method can perform well as a nonlinear classifier.","category":"page"},{"location":"tutorials/nonlinear/classifiers/","page":"Example: classification problems","title":"Example: classification problems","text":"The result has a fairly strong dependence on the choice of parameters, with larger values of mu allowing for a more complex boundary while smaller values lead to a smoother boundary for the classifier. Determining a better performing kernel function and choice of parameters is covered by the process of cross-validation with respect to the dataset, where different testing, training and tuning sets are used to validate the best choice of parameters against a statistical measure of error.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"CurrentModule = JuMP\nDocTestSetup = quote\n using JuMP, HiGHS, SCS\nend\nDocTestFilters = [r\"≤|<=\", r\"≥|>=\", r\" == | = \", r\" ∈ | in \", r\"MathOptInterface|MOI\"]","category":"page"},{"location":"manual/models/#jump_models","page":"Models","title":"Models","text":"","category":"section"},{"location":"manual/models/","page":"Models","title":"Models","text":"JuMP models are the fundamental building block that we use to construct optimization problems. They hold things like the variables and constraints, as well as which solver to use and even solution information.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"info: Info\nJuMP uses \"optimizer\" as a synonym for \"solver.\" Our convention is to use \"solver\" to refer to the underlying software, and use \"optimizer\" to refer to the Julia object that wraps the solver. For example, HiGHS is a solver, and HiGHS.Optimizer is an optimizer.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"tip: Tip\nSee Supported solvers for a list of available solvers.","category":"page"},{"location":"manual/models/#Create-a-model","page":"Models","title":"Create a model","text":"","category":"section"},{"location":"manual/models/","page":"Models","title":"Models","text":"Create a model by passing an optimizer to Model:","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"julia> model = Model(HiGHS.Optimizer)\nA JuMP Model\n├ solver: HiGHS\n├ objective_sense: FEASIBILITY_SENSE\n├ num_variables: 0\n├ num_constraints: 0\n└ Names registered in the model: none","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"If you don't know which optimizer you will be using at creation time, create a model without an optimizer, and then call set_optimizer at any time prior to optimize!:","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"julia> model = Model()\nA JuMP Model\n├ solver: none\n├ objective_sense: FEASIBILITY_SENSE\n├ num_variables: 0\n├ num_constraints: 0\n└ Names registered in the model: none\n\njulia> set_optimizer(model, HiGHS.Optimizer)","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"tip: Tip\nDon't know what the fields Model mode and CachingOptimizer state mean? Read the Backends section.","category":"page"},{"location":"manual/models/#What-is-the-difference?","page":"Models","title":"What is the difference?","text":"","category":"section"},{"location":"manual/models/","page":"Models","title":"Models","text":"For most models, there is no difference between passing the optimizer to Model, and calling set_optimizer.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"However, if an optimizer does not support a constraint in the model, the timing of when an error will be thrown can differ:","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"If you pass an optimizer, an error will be thrown when you try to add the constraint.\nIf you call set_optimizer, an error will be thrown when you try to solve the model via optimize!.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"Therefore, most users should pass an optimizer to Model because it provides the earliest warning that your solver is not suitable for the model you are trying to build. However, if you are modifying a problem by adding and deleting different constraint types, you may need to use set_optimizer. See Switching optimizer for the relaxed problem for an example of when this is useful.","category":"page"},{"location":"manual/models/#Reducing-time-to-first-solve-latency","page":"Models","title":"Reducing time-to-first-solve latency","text":"","category":"section"},{"location":"manual/models/","page":"Models","title":"Models","text":"By default, JuMP uses bridges to reformulate the model you are building into an equivalent model supported by the solver.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"However, if your model is already supported by the solver, bridges add latency (read The \"time-to-first-solve\" issue). This is particularly noticeable for small models.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"To reduce the \"time-to-first-solve,s\" try passing add_bridges = false.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"julia> model = Model(HiGHS.Optimizer; add_bridges = false);","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"or","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"julia> model = Model();\n\njulia> set_optimizer(model, HiGHS.Optimizer; add_bridges = false)","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"However, be wary. If your model and solver combination needs bridges, an error will be thrown:","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"julia> model = Model(SCS.Optimizer; add_bridges = false);\n\n\njulia> @variable(model, x)\nx\n\njulia> @constraint(model, 2x <= 1)\nERROR: Constraints of type MathOptInterface.ScalarAffineFunction{Float64}-in-MathOptInterface.LessThan{Float64} are not supported by the solver.\n\nIf you expected the solver to support your problem, you may have an error in your formulation. Otherwise, consider using a different solver.\n\nThe list of available solvers, along with the problem types they support, is available at https://jump.dev/JuMP.jl/stable/installation/#Supported-solvers.\n[...]","category":"page"},{"location":"manual/models/#Solvers-which-expect-environments","page":"Models","title":"Solvers which expect environments","text":"","category":"section"},{"location":"manual/models/","page":"Models","title":"Models","text":"Some solvers accept (or require) positional arguments such as a license environment or a path to a binary executable. For these solvers, you can pass a function to Model which takes zero arguments and returns an instance of the optimizer.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"A common use-case for this is passing an environment or sub-solver to the optimizer:","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"julia> import HiGHS\n\njulia> import MultiObjectiveAlgorithms as MOA\n\njulia> model = Model(() -> MOA.Optimizer(HiGHS.Optimizer))\nA JuMP Model\n├ solver: MOA[algorithm=MultiObjectiveAlgorithms.Lexicographic, optimizer=HiGHS]\n├ objective_sense: FEASIBILITY_SENSE\n├ num_variables: 0\n├ num_constraints: 0\n└ Names registered in the model: none","category":"page"},{"location":"manual/models/#solver_options","page":"Models","title":"Solver options","text":"","category":"section"},{"location":"manual/models/","page":"Models","title":"Models","text":"JuMP uses \"attribute\" as a synonym for \"option.\" Use optimizer_with_attributes to create an optimizer with some attributes initialized:","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"julia> model = Model(\n optimizer_with_attributes(HiGHS.Optimizer, \"output_flag\" => false),\n )\nA JuMP Model\n├ solver: HiGHS\n├ objective_sense: FEASIBILITY_SENSE\n├ num_variables: 0\n├ num_constraints: 0\n└ Names registered in the model: none","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"Alternatively, use set_attribute to set an attribute after the model has been created:","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"julia> model = Model(HiGHS.Optimizer);\n\njulia> set_attribute(model, \"output_flag\", false)\n\njulia> get_attribute(model, \"output_flag\")\nfalse","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"You can also modify attributes within an optimizer_with_attributes object:","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"julia> solver = optimizer_with_attributes(HiGHS.Optimizer, \"output_flag\" => true);\n\njulia> get_attribute(solver, \"output_flag\")\ntrue\n\njulia> set_attribute(solver, \"output_flag\", false)\n\njulia> get_attribute(solver, \"output_flag\")\nfalse\n\njulia> model = Model(solver);","category":"page"},{"location":"manual/models/#Changing-the-number-types","page":"Models","title":"Changing the number types","text":"","category":"section"},{"location":"manual/models/","page":"Models","title":"Models","text":"By default, the coefficients of affine and quadratic expressions are numbers of type either Float64 or Complex{Float64} (see Complex number support).","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"The type Float64 can be changed using the GenericModel constructor:","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"julia> model = GenericModel{Rational{BigInt}}();\n\njulia> @variable(model, x)\nx\n\njulia> @expression(model, expr, 1 // 3 * x)\n1//3 x\n\njulia> typeof(expr)\nGenericAffExpr{Rational{BigInt}, GenericVariableRef{Rational{BigInt}}}","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"Using a value_type other than Float64 is an advanced operation and should be used only if the underlying solver actually solves the problem using the provided value type.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"warning: Warning\nNonlinear Modeling is currently restricted to the Float64 number type.","category":"page"},{"location":"manual/models/#Print-the-model","page":"Models","title":"Print the model","text":"","category":"section"},{"location":"manual/models/","page":"Models","title":"Models","text":"By default, show(model) will print a summary of the problem:","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"julia> model = Model(); @variable(model, x >= 0); @objective(model, Max, x);\n\njulia> model\nA JuMP Model\n├ solver: none\n├ objective_sense: MAX_SENSE\n│ └ objective_function_type: VariableRef\n├ num_variables: 1\n├ num_constraints: 1\n│ └ VariableRef in MOI.GreaterThan{Float64}: 1\n└ Names registered in the model\n └ :x","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"Use print to print the formulation of the model (in IJulia, this will render as LaTeX.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"julia> print(model)\nMax x\nSubject to\n x ≥ 0","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"warning: Warning\nThis format is specific to JuMP and may change in any future release. It is not intended to be an instance format. To write the model to a file, use write_to_file instead.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"Use latex_formulation to display the model in LaTeX form.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"julia> latex_formulation(model)\n$$ \\begin{aligned}\n\\max\\quad & x\\\\\n\\text{Subject to} \\quad & x \\geq 0\\\\\n\\end{aligned} $$","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"In IJulia (and Documenter), ending a cell in with latex_formulation will render the model in LaTeX:","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"using JuMP # hide\nmodel = Model() # hide\n@variable(model, x >= 0) # hide\n@objective(model, Max, x) # hide\nlatex_formulation(model)","category":"page"},{"location":"manual/models/#Turn-off-output","page":"Models","title":"Turn off output","text":"","category":"section"},{"location":"manual/models/","page":"Models","title":"Models","text":"Use set_silent and unset_silent to disable or enable printing output from the solver.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"julia> model = Model(HiGHS.Optimizer);\n\njulia> set_silent(model)\n\njulia> unset_silent(model)","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"tip: Tip\nMost solvers will also have a solver-specific option to provide finer-grained control over the output. Consult their README's for details.","category":"page"},{"location":"manual/models/#Set-a-time-limit","page":"Models","title":"Set a time limit","text":"","category":"section"},{"location":"manual/models/","page":"Models","title":"Models","text":"Use set_time_limit_sec, unset_time_limit_sec, and time_limit_sec to manage time limits.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"julia> model = Model(HiGHS.Optimizer);\n\njulia> set_time_limit_sec(model, 60.0)\n\n\njulia> time_limit_sec(model)\n60.0\n\njulia> unset_time_limit_sec(model)\n\njulia> limit = time_limit_sec(model)\n\njulia> limit === nothing\ntrue","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"If your time limit is encoded as a Dates.Period object, use the following code to convert it to Float64 for set_time_limit_sec:","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"julia> import Dates\n\njulia> seconds(x::Dates.Period) = 1e-3 * Dates.value(round(x, Dates.Millisecond))\nseconds (generic function with 1 method)\n\njulia> set_time_limit_sec(model, seconds(Dates.Hour(1)))\n\njulia> time_limit_sec(model)\n3600.0","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"info: Info\nSome solvers do not support time limits. In these cases, an error will be thrown.","category":"page"},{"location":"manual/models/#Write-a-model-to-file","page":"Models","title":"Write a model to file","text":"","category":"section"},{"location":"manual/models/","page":"Models","title":"Models","text":"JuMP can write models to a variety of file-formats using write_to_file and Base.write.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"For most common file formats, the file type will be detected from the extension.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"For example, here is how to write an MPS file:","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"julia> model = Model();\n\njulia> write_to_file(model, \"model.mps\")","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"Other supported file formats include:","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":".cbf for the Conic Benchmark Format\n.lp for the LP file format\n.mof.json for the MathOptFormat\n.nl for AMPL's NL file format\n.rew for the REW file format\n.sdpa and \".dat-s\" for the SDPA file format","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"To write to a specific io::IO, use Base.write. Specify the file type by passing a MOI.FileFormats.FileFormat enum.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"julia> model = Model();\n\njulia> io = IOBuffer();\n\njulia> write(io, model; format = MOI.FileFormats.FORMAT_MPS)","category":"page"},{"location":"manual/models/#Read-a-model-from-file","page":"Models","title":"Read a model from file","text":"","category":"section"},{"location":"manual/models/","page":"Models","title":"Models","text":"JuMP models can be created from file formats using read_from_file and Base.read.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"julia> model = read_from_file(\"model.mps\")\nA JuMP Model\n├ solver: none\n├ objective_sense: MIN_SENSE\n│ └ objective_function_type: AffExpr\n├ num_variables: 0\n├ num_constraints: 0\n└ Names registered in the model: none\n\njulia> seekstart(io);\n\njulia> model2 = read(io, Model; format = MOI.FileFormats.FORMAT_MPS)\nA JuMP Model\n├ solver: none\n├ objective_sense: MIN_SENSE\n│ └ objective_function_type: AffExpr\n├ num_variables: 0\n├ num_constraints: 0\n└ Names registered in the model: none","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"note: Note\nBecause file formats do not serialize the containers of JuMP variables and constraints, the names in the model will not be registered. Therefore, you cannot access named variables and constraints via model[:x]. Instead, use variable_by_name or constraint_by_name to access specific variables or constraints.","category":"page"},{"location":"manual/models/#Relax-integrality","page":"Models","title":"Relax integrality","text":"","category":"section"},{"location":"manual/models/","page":"Models","title":"Models","text":"Use relax_integrality to remove any integrality constraints from the model, such as integer and binary restrictions on variables. relax_integrality returns a function that can be later called with zero arguments to re-add the removed constraints:","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"julia> model = Model();\n\njulia> @variable(model, x, Int)\nx\n\njulia> num_constraints(model, VariableRef, MOI.Integer)\n1\n\njulia> undo = relax_integrality(model);\n\njulia> num_constraints(model, VariableRef, MOI.Integer)\n0\n\njulia> undo()\n\njulia> num_constraints(model, VariableRef, MOI.Integer)\n1","category":"page"},{"location":"manual/models/#Switching-optimizer-for-the-relaxed-problem","page":"Models","title":"Switching optimizer for the relaxed problem","text":"","category":"section"},{"location":"manual/models/","page":"Models","title":"Models","text":"A common reason for relaxing integrality is to compute dual variables of the relaxed problem. However, some mixed-integer linear solvers (for example, Cbc) do not return dual solutions, even if the problem does not have integrality restrictions.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"Therefore, after relax_integrality you should call set_optimizer with a solver that does support dual solutions, such as Clp.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"For example, instead of:","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"using JuMP, Cbc\nmodel = Model(Cbc.Optimizer)\n@variable(model, x, Int)\nundo = relax_integrality(model)\noptimize!(model)\nreduced_cost(x) # Errors","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"do:","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"using JuMP, Cbc, Clp\nmodel = Model(Cbc.Optimizer)\n@variable(model, x, Int)\nundo = relax_integrality(model)\nset_optimizer(model, Clp.Optimizer)\noptimize!(model)\nreduced_cost(x) # Works","category":"page"},{"location":"manual/models/#Get-the-matrix-representation","page":"Models","title":"Get the matrix representation","text":"","category":"section"},{"location":"manual/models/","page":"Models","title":"Models","text":"Use lp_matrix_data to return a data structure that represents the matrix form of a linear program.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"julia> begin\n model = Model()\n @variable(model, x >= 1, Bin)\n @variable(model, 2 <= y)\n @variable(model, 3 <= z <= 4, Int)\n @constraint(model, x == 5)\n @constraint(model, 2x + 3y <= 6)\n @constraint(model, -4y >= 5z + 7)\n @constraint(model, -1 <= x + y <= 2)\n @objective(model, Max, 1 + 2x)\n end;\n\njulia> data = lp_matrix_data(model);\n\njulia> data.A\n4×3 SparseArrays.SparseMatrixCSC{Float64, Int64} with 7 stored entries:\n 1.0 ⋅ ⋅\n ⋅ -4.0 -5.0\n 2.0 3.0 ⋅\n 1.0 1.0 ⋅\n\njulia> data.b_lower\n4-element Vector{Float64}:\n 5.0\n 7.0\n -Inf\n -1.0\n\njulia> data.b_upper\n4-element Vector{Float64}:\n 5.0\n Inf\n 6.0\n 2.0\n\njulia> data.x_lower\n3-element Vector{Float64}:\n 1.0\n 2.0\n 3.0\n\njulia> data.x_upper\n3-element Vector{Float64}:\n Inf\n Inf\n 4.0\n\njulia> data.c\n3-element Vector{Float64}:\n 2.0\n 0.0\n 0.0\n\njulia> data.c_offset\n1.0\n\njulia> data.sense\nMAX_SENSE::OptimizationSense = 1\n\njulia> data.integers\n1-element Vector{Int64}:\n 3\n\njulia> data.binaries\n1-element Vector{Int64}:\n 1","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"warning: Warning\nlp_matrix_data is intentionally limited in the types of problems that it supports and the structure of the matrices it outputs. It is mainly intended as a pedagogical and debugging tool. It should not be used to interface solvers, see Implementing a solver interface instead.","category":"page"},{"location":"manual/models/#Backends","page":"Models","title":"Backends","text":"","category":"section"},{"location":"manual/models/","page":"Models","title":"Models","text":"info: Info\nThis section discusses advanced features of JuMP. For new users, you may want to skip this section. You don't need to know how JuMP manages problems behind the scenes to create and solve JuMP models.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"A JuMP Model is a thin layer around a backend of type MOI.ModelLike that stores the optimization problem and acts as the optimization solver.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"However, if you construct a model like Model(HiGHS.Optimizer), the backend is not a HiGHS.Optimizer, but a more complicated object.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"From JuMP, the MOI backend can be accessed using the backend function. Let's see what the backend of a JuMP Model is:","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"julia> model = Model(HiGHS.Optimizer);\n\njulia> b = backend(model)\nMOIU.CachingOptimizer\n├ state: EMPTY_OPTIMIZER\n├ mode: AUTOMATIC\n├ model_cache: MOIU.UniversalFallback{MOIU.Model{Float64}}\n│ ├ ObjectiveSense: FEASIBILITY_SENSE\n│ ├ ObjectiveFunctionType: MOI.ScalarAffineFunction{Float64}\n│ ├ NumberOfVariables: 0\n│ └ NumberOfConstraints: 0\n└ optimizer: MOIB.LazyBridgeOptimizer{HiGHS.Optimizer}\n ├ Variable bridges: none\n ├ Constraint bridges: none\n ├ Objective bridges: none\n └ model: A HiGHS model with 0 columns and 0 rows.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"Uh oh. Even though we passed a HiGHS.Optimizer, the backend is a much more complicated object.","category":"page"},{"location":"manual/models/#CachingOptimizer","page":"Models","title":"CachingOptimizer","text":"","category":"section"},{"location":"manual/models/","page":"Models","title":"Models","text":"A MOIU.CachingOptimizer is a layer that abstracts the difference between solvers that support incremental modification (for example, they support adding variables one-by-one), and solvers that require the entire problem in a single API call (for example, they only accept the A, b and c matrices of a linear program).","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"It has two parts:","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"A cache, where the model can be built and modified incrementally\njulia> b.model_cache\nMOIU.UniversalFallback{MOIU.Model{Float64}}\n├ ObjectiveSense: FEASIBILITY_SENSE\n├ ObjectiveFunctionType: MOI.ScalarAffineFunction{Float64}\n├ NumberOfVariables: 0\n└ NumberOfConstraints: 0\nAn optimizer, which is used to solve the problem\njulia> b.optimizer\nMOIB.LazyBridgeOptimizer{HiGHS.Optimizer}\n├ Variable bridges: none\n├ Constraint bridges: none\n├ Objective bridges: none\n└ model: A HiGHS model with 0 columns and 0 rows.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"info: Info\nThe LazyBridgeOptimizer section explains what a LazyBridgeOptimizer is.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"The CachingOptimizer has logic to decide when to copy the problem from the cache to the optimizer, and when it can efficiently update the optimizer in-place.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"A CachingOptimizer may be in one of three possible states:","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"NO_OPTIMIZER: The CachingOptimizer does not have any optimizer.\nEMPTY_OPTIMIZER: The CachingOptimizer has an empty optimizer, and it is not synchronized with the cached model.\nATTACHED_OPTIMIZER: The CachingOptimizer has an optimizer, and it is synchronized with the cached model.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"A CachingOptimizer has two modes of operation:","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"AUTOMATIC: The CachingOptimizer changes its state when necessary. For example, optimize! will automatically call attach_optimizer (an optimizer must have been previously set). Attempting to add a constraint or perform a modification not supported by the optimizer results in a drop to EMPTY_OPTIMIZER mode.\nMANUAL: The user must change the state of the CachingOptimizer using MOIU.reset_optimizer(::JuMP.Model), MOIU.drop_optimizer(::JuMP.Model), and MOIU.attach_optimizer(::JuMP.Model). Attempting to perform an operation in the incorrect state results in an error.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"By default Model will create a CachingOptimizer in AUTOMATIC mode.","category":"page"},{"location":"manual/models/#LazyBridgeOptimizer","page":"Models","title":"LazyBridgeOptimizer","text":"","category":"section"},{"location":"manual/models/","page":"Models","title":"Models","text":"The second layer that JuMP applies automatically is a MOI.Bridges.LazyBridgeOptimizer. A MOI.Bridges.LazyBridgeOptimizer is an MOI layer that attempts to transform the problem from the formulation provided by the user into an equivalent problem supported by the solver. This may involve adding new variables and constraints to the optimizer. The transformations are selected from a set of known recipes called bridges.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"A common example of a bridge is one that splits an interval constraint like @constraint(model, 1 <= x + y <= 2) into two constraints, @constraint(model, x + y >= 1) and @constraint(model, x + y <= 2).","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"Use the add_bridges = false keyword to remove the bridging layer:","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"julia> model = Model(HiGHS.Optimizer; add_bridges = false)\nA JuMP Model\n├ solver: HiGHS\n├ objective_sense: FEASIBILITY_SENSE\n├ num_variables: 0\n├ num_constraints: 0\n└ Names registered in the model: none\n\njulia> backend(model)\nMOIU.CachingOptimizer\n├ state: EMPTY_OPTIMIZER\n├ mode: AUTOMATIC\n├ model_cache: MOIU.UniversalFallback{MOIU.Model{Float64}}\n│ ├ ObjectiveSense: FEASIBILITY_SENSE\n│ ├ ObjectiveFunctionType: MOI.ScalarAffineFunction{Float64}\n│ ├ NumberOfVariables: 0\n│ └ NumberOfConstraints: 0\n└ optimizer: A HiGHS model with 0 columns and 0 rows.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"Bridges can be added and removed from a MOI.Bridges.LazyBridgeOptimizer using add_bridge and remove_bridge. Use print_active_bridges to see which bridges are used to reformulate the model. Read the Example: ellipsoid approximation tutorial for more details.","category":"page"},{"location":"manual/models/#Unsafe-backend","page":"Models","title":"Unsafe backend","text":"","category":"section"},{"location":"manual/models/","page":"Models","title":"Models","text":"In some advanced use-cases, it is necessary to work with the inner optimization model directly. To access this model, use unsafe_backend:","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"julia> backend(model)\nMOIU.CachingOptimizer\n├ state: EMPTY_OPTIMIZER\n├ mode: AUTOMATIC\n├ model_cache: MOIU.UniversalFallback{MOIU.Model{Float64}}\n│ ├ ObjectiveSense: FEASIBILITY_SENSE\n│ ├ ObjectiveFunctionType: MOI.ScalarAffineFunction{Float64}\n│ ├ NumberOfVariables: 0\n│ └ NumberOfConstraints: 0\n└ optimizer: MOIB.LazyBridgeOptimizer{HiGHS.Optimizer}\n ├ Variable bridges: none\n ├ Constraint bridges: none\n ├ Objective bridges: none\n └ model: A HiGHS model with 0 columns and 0 rows.\n\njulia> unsafe_backend(model)\nA HiGHS model with 0 columns and 0 rows.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"warning: Warning\nbackend and unsafe_backend are advanced routines. Read their docstrings to understand the caveats of their usage, and only call them if you wish to access low-level solver-specific functions.","category":"page"},{"location":"manual/models/#Direct-mode","page":"Models","title":"Direct mode","text":"","category":"section"},{"location":"manual/models/","page":"Models","title":"Models","text":"Using a CachingOptimizer results in an additional copy of the model being stored by JuMP in the .model_cache field. To avoid this overhead, create a JuMP model using direct_model:","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"julia> model = direct_model(HiGHS.Optimizer())\nA JuMP Model\n├ mode: DIRECT\n├ solver: HiGHS\n├ objective_sense: FEASIBILITY_SENSE\n├ num_variables: 0\n├ num_constraints: 0\n└ Names registered in the model: none","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"warning: Warning\nSolvers that do not support incremental modification do not support direct_model. An error will be thrown, telling you to use a CachingOptimizer instead.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"The benefit of using direct_model is that there are no extra layers (for example, Cachingoptimizer or LazyBridgeOptimizer) between model and the provided optimizer:","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"julia> backend(model)\nA HiGHS model with 0 columns and 0 rows.","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"A downside of direct mode is that there is no bridging layer. Therefore, only constraints which are natively supported by the solver are supported. For example, HiGHS.jl does not implement quadratic constraints:","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"julia> model = direct_model(HiGHS.Optimizer());\n\njulia> set_silent(model)\n\njulia> @variable(model, x[1:2]);\n\njulia> @constraint(model, x[1]^2 + x[2]^2 <= 2)\nERROR: Constraints of type MathOptInterface.ScalarQuadraticFunction{Float64}-in-MathOptInterface.LessThan{Float64} are not supported by the solver.\n\nIf you expected the solver to support your problem, you may have an error in your formulation. Otherwise, consider using a different solver.\n\nThe list of available solvers, along with the problem types they support, is available at https://jump.dev/JuMP.jl/stable/installation/#Supported-solvers.\nStacktrace:","category":"page"},{"location":"manual/models/","page":"Models","title":"Models","text":"warning: Warning\nAnother downside of direct mode is that the behavior of querying solution information after modifying the problem is solver-specific. This can lead to errors, or the solver silently returning an incorrect value. See OptimizeNotCalled errors for more information.","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"EditURL = \"https://github.com/jump-dev/MathOptInterface.jl/blob/v1.34.0/docs/src/background/duality.md\"","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"CurrentModule = MathOptInterface\nDocTestSetup = quote\n import MathOptInterface as MOI\nend\nDocTestFilters = [r\"MathOptInterface|MOI\"]","category":"page"},{"location":"moi/background/duality/#Duality","page":"Duality","title":"Duality","text":"","category":"section"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"Conic duality is the starting point for MOI's duality conventions. When all functions are affine (or coordinate projections), and all constraint sets are closed convex cones, the model may be called a conic optimization problem.","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"For a minimization problem in geometric conic form, the primal is:","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"beginalign\n min_x in mathbbR^n a_0^T x + b_0\n\n textst A_i x + b_i in mathcalC_i i = 1 ldots m\nendalign","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"and the dual is a maximization problem in standard conic form:","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"beginalign\n max_y_1 ldots y_m -sum_i=1^m b_i^T y_i + b_0\n\n textst a_0 - sum_i=1^m A_i^T y_i = 0\n\n y_i in mathcalC_i^* i = 1 ldots m\nendalign","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"where each mathcalC_i is a closed convex cone and mathcalC_i^* is its dual cone.","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"For a maximization problem in geometric conic form, the primal is:","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"beginalign\n max_x in mathbbR^n a_0^T x + b_0\n\n textst A_i x + b_i in mathcalC_i i = 1 ldots m\nendalign","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"and the dual is a minimization problem in standard conic form:","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"beginalign\n min_y_1 ldots y_m sum_i=1^m b_i^T y_i + b_0\n\n textst a_0 + sum_i=1^m A_i^T y_i = 0\n\n y_i in mathcalC_i^* i = 1 ldots m\nendalign","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"A linear inequality constraint a^T x + b ge c is equivalent to a^T x + b - c in mathbbR_+, and a^T x + b le c is equivalent to a^T x + b - c in mathbbR_-. Variable-wise constraints are affine constraints with the appropriate identity mapping in place of A_i.","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"For the special case of minimization LPs, the MOI primal form can be stated as:","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"beginalign\n min_x in mathbbR^n a_0^T x + b_0\n\n textst\nA_1 x ge b_1\n A_2 x le b_2\n A_3 x = b_3\nendalign","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"By applying the stated transformations to conic form, taking the dual, and transforming back into linear inequality form, one obtains the following dual:","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"beginalign\n max_y_1y_2y_3 b_1^Ty_1 + b_2^Ty_2 + b_3^Ty_3 + b_0\n\n textst\nA_1^Ty_1 + A_2^Ty_2 + A_3^Ty_3 = a_0\n y_1 ge 0\n y_2 le 0\nendalign","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"For maximization LPs, the MOI primal form can be stated as:","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"beginalign\n max_x in mathbbR^n a_0^T x + b_0\n\n textst\nA_1 x ge b_1\n A_2 x le b_2\n A_3 x = b_3\nendalign","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"and similarly, the dual is:","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"beginalign\n min_y_1y_2y_3 -b_1^Ty_1 - b_2^Ty_2 - b_3^Ty_3 + b_0\n\n textst\nA_1^Ty_1 + A_2^Ty_2 + A_3^Ty_3 = -a_0\n y_1 ge 0\n y_2 le 0\nendalign","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"warning: Warning\nFor the LP case, the signs of the feasible dual variables depend only on the sense of the corresponding primal inequality and not on the objective sense.","category":"page"},{"location":"moi/background/duality/#Duality-and-scalar-product","page":"Duality","title":"Duality and scalar product","text":"","category":"section"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"The scalar product is different from the canonical one for the sets PositiveSemidefiniteConeTriangle, LogDetConeTriangle, RootDetConeTriangle.","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"If the set C_i of the section Duality is one of these three cones, then the rows of the matrix A_i corresponding to off-diagonal entries are twice the value of the coefficients field in the VectorAffineFunction for the corresponding rows. See PositiveSemidefiniteConeTriangle for details.","category":"page"},{"location":"moi/background/duality/#Dual-for-problems-with-quadratic-functions","page":"Duality","title":"Dual for problems with quadratic functions","text":"","category":"section"},{"location":"moi/background/duality/#Quadratic-Programs-(QPs)","page":"Duality","title":"Quadratic Programs (QPs)","text":"","category":"section"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"For quadratic programs with only affine conic constraints,","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"beginalign*\n min_x in mathbbR^n frac12x^TQ_0x + a_0^T x + b_0\n\n textst A_i x + b_i in mathcalC_i i = 1 ldots m\nendalign*","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"with cones mathcalC_i subseteq mathbbR^m_i for i = 1 ldots m, consider the Lagrangian function","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"L(x y) = frac12x^TQ_0x + a_0^T x + b_0 - sum_i = 1^m y_i^T (A_i x + b_i)","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"Let z(y) denote sum_i = 1^m A_i^T y_i - a_0, the Lagrangian can be rewritten as","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"L(x y) = frac12x^TQ_0x - z(y)^T x + b_0 - sum_i = 1^m y_i^T b_i","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"The condition nabla_x L(x y) = 0 gives","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"0 = nabla_x L(x y) = Q_0x + a_0 - sum_i = 1^m y_i^T b_i","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"which gives Q_0x = z(y). This allows to obtain that","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"min_x in mathbbR^n L(x y) = -frac12x^TQ_0x + b_0 - sum_i = 1^m y_i^T b_i","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"so the dual problem is","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"max_y_i in mathcalC_i^* min_x in mathbbR^n -frac12x^TQ_0x + b_0 - sum_i = 1^m y_i^T b_i","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"If Q_0 is invertible, we have x = Q_0^-1z(y) hence","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"min_x in mathbbR^n L(x y) = -frac12z(y)^TQ_0^-1z(y) + b_0 - sum_i = 1^m y_i^T b_i","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"so the dual problem is","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"max_y_i in mathcalC_i^* -frac12z(y)^TQ_0^-1z(y) + b_0 - sum_i = 1^m y_i^T b_i","category":"page"},{"location":"moi/background/duality/#Quadratically-Constrained-Quadratic-Programs-(QCQPs)","page":"Duality","title":"Quadratically Constrained Quadratic Programs (QCQPs)","text":"","category":"section"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"Given a problem with both quadratic function and quadratic objectives:","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"beginalign*\n min_x in mathbbR^n frac12x^TQ_0x + a_0^T x + b_0\n\n textst frac12x^TQ_ix + a_i^T x + b_i in mathcalC_i i = 1 ldots m\nendalign*","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"with cones mathcalC_i subseteq mathbbR for i = 1 ldots m, consider the Lagrangian function","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"L(x y) = frac12x^TQ_0x + a_0^T x + b_0 - sum_i = 1^m y_i (frac12x^TQ_ix + a_i^T x + b_i)","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"A pair of primal-dual variables (x^star y^star) is optimal if","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"x^star is a minimizer of\nmin_x in mathbbR^n L(x y^star)\nThat is,\n0 = nabla_x L(x y^star) = Q_0x + a_0 - sum_i = 1^m y_i^star (Q_ix + a_i)\nand y^star is a maximizer of\nmax_y_i in mathcalC_i^* L(x^star y)\nThat is, for all i = 1 ldots m, frac12x^TQ_ix + a_i^T x + b_i is either zero or in the normal cone of mathcalC_i^* at y^star. For instance, if mathcalC_i is z in mathbbR z le 0 , this means that if frac12x^TQ_ix + a_i^T x + b_i is nonzero at x^star then y_i^star = 0. This is the classical complementary slackness condition.","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"If mathcalC_i is a vector set, the discussion remains valid with y_i(frac12x^TQ_ix + a_i^T x + b_i) replaced with the scalar product between y_i and the vector of scalar-valued quadratic functions.","category":"page"},{"location":"moi/background/duality/#Dual-for-square-semidefinite-matrices","page":"Duality","title":"Dual for square semidefinite matrices","text":"","category":"section"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"The set PositiveSemidefiniteConeTriangle is a self-dual. That is, querying ConstraintDual of a PositiveSemidefiniteConeTriangle constraint returns a vector that is itself a member of PositiveSemidefiniteConeTriangle.","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"However, the dual of PositiveSemidefiniteConeSquare is not so straight forward. This section explains the duality convention we use, and how it is derived.","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"info: Info\nIf you have a PositiveSemidefiniteConeSquare constraint, the result matrix A from ConstraintDual is not positive semidefinite. However, A + A^top is positive semidefinite.","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"Let mathcalS_+ be the cone of symmetric semidefinite matrices in the fracn(n+1)2 dimensional space of symmetric mathbbR^n times n matrices. That is, mathcalS_+ is the set PositiveSemidefiniteConeTriangle. It is well known that mathcalS_+ is a self-dual proper cone.","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"Let mathcalP_+ be the cone of symmetric semidefinite matrices in the n^2 dimensional space of mathbbR^n times n matrices. That is mathcalP_+ is the set PositiveSemidefiniteConeSquare.","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"In addition, let mathcalD_+ be the cone of matrices A such that A+A^top in mathcalP_+.","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"mathcalP_+ is not proper because it is not solid (it is not n^2 dimensional), so it is not necessarily true that mathcalP_+^** = mathcalP_+.","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"However, this is the case, because we will show that mathcalP_+^* = mathcalD_+ and mathcalD_+^* = mathcalP_+.","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"First, let us see why mathcalP_+^* = mathcalD_+.","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"If B is symmetric, then","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"langle AB rangle = langle A^top B^top rangle = langle A^top Brangle","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"so","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"2langle A B rangle = langle A B rangle + langle A^top B rangle = langle A + A^top B rangle","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"Therefore, langle ABrangle ge 0 for all B in mathcalP_+ if and only if langle A+A^topBrangle ge 0 for all B in mathcalP_+. Since A+A^top is symmetric, and we know that mathcalS_+ is self-dual, we have shown that mathcalP_+^* is the set of matrices A such that A+A^top in mathcalP_+.","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"Second, let us see why mathcalD_+^* = mathcalP_+.","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"Since A in mathcalD_+ implies that A^top in mathcalD_+, B in mathcalD_+^* means that langle A+A^topBrangle ge 0 for all A in mathcalD_+, and hence B in mathcalP_+.","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"To see why it should be symmetric, simply notice that if B_ij B_ji, then langle ABrangle can be made arbitrarily small by setting A_ij = A_ij + s and A_ji = A_ji - s, with s arbitrarily large, and A stays in mathcalD_+ because A+A^top does not change.","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"Typically, the primal/dual pair for semidefinite programs is presented as:","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"beginalign\n min langle C X rangle \ntextst langle A_k Xrangle = b_k forall k \n X in mathcalS_+\nendalign","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"with the dual","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"beginalign\n max sum_k b_k y_k \ntextst C - sum A_k y_k in mathcalS_+\nendalign","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"If we allow A_k to be non-symmetric, we should instead use:","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"beginalign\n min langle C X rangle \ntextst langle A_k Xrangle = b_k forall k \n X in mathcalD_+\nendalign","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"with the dual","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"beginalign\n max sum b_k y_k \ntextst C - sum A_k y_k in mathcalP_+\nendalign","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"This is implemented as:","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"beginalign\n min langle C Z rangle + langle C - C^top S rangle \ntextst langle A_k Z rangle + langle A_k - A_k^top S rangle = b_k forall k \n Z in mathcalS_+\nendalign","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"with the dual","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"beginalign\n max sum b_k y_k \ntextst C+C^top - sum (A_k+A_k^top) y_k in mathcalS_+ \n C-C^top - sum(A_k-A_k^top) y_k = 0\nendalign","category":"page"},{"location":"moi/background/duality/","page":"Duality","title":"Duality","text":"and we recover Z = X + X^top.","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"EditURL = \"tips_and_tricks.jl\"","category":"page"},{"location":"tutorials/conic/tips_and_tricks/#Modeling-with-cones","page":"Modeling with cones","title":"Modeling with cones","text":"","category":"section"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"This tutorial was generated using Literate.jl. Download the source as a .jl file.","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"This tutorial was originally contributed by Arpit Bhatia.","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"The purpose of this tutorial is to show how you can model various common problems using conic optimization.","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"tip: Tip\nA good resource for learning more about functions which can be modeled using cones is the MOSEK Modeling Cookbook.","category":"page"},{"location":"tutorials/conic/tips_and_tricks/#Required-packages","page":"Modeling with cones","title":"Required packages","text":"","category":"section"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"This tutorial uses the following packages:","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"using JuMP\nimport LinearAlgebra\nimport MathOptInterface as MOI\nimport SCS\n\nimport Random # hide\nRandom.seed!(1234) # hide\nnothing # hide","category":"page"},{"location":"tutorials/conic/tips_and_tricks/#Background-theory","page":"Modeling with cones","title":"Background theory","text":"","category":"section"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"A subset C of a vector space V is a cone if forall x in C and positive scalars lambda 0, the product lambda x in C.","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"A cone C is a convex cone if lambda x + (1 - lambda) y in C, for any lambda in 0 1, and any x y in C.","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"Conic programming problems are convex optimization problems in which a convex function is minimized over the intersection of an affine subspace and a convex cone. An example of a conic-form minimization problems, in the primal form is:","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"beginaligned\n min_x in mathbbR^n a_0^T x + b_0 \n textst A_i x + b_i in mathcalC_i i = 1 ldots m\nendaligned","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"The corresponding dual problem is:","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"beginaligned\n max_y_1 ldots y_m -sum_i=1^m b_i^T y_i + b_0 \n textst a_0 - sum_i=1^m A_i^T y_i = 0 \n y_i in mathcalC_i^* i = 1 ldots m\nendaligned","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"where each mathcalC_i is a closed convex cone and mathcalC_i^* is its dual cone.","category":"page"},{"location":"tutorials/conic/tips_and_tricks/#Second-Order-Cone","page":"Modeling with cones","title":"Second-Order Cone","text":"","category":"section"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"The SecondOrderCone (or Lorentz Cone) of dimension n is a cone of the form:","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"K_soc = (t x) in mathbbR^n t ge x_2 ","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"It is most commonly used to represent the L2-norm of the vector x:","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"model = Model(SCS.Optimizer)\nset_silent(model)\n@variable(model, x[1:3])\n@variable(model, t)\n@constraint(model, sum(x) == 1)\n@constraint(model, [t; x] in SecondOrderCone())\n@objective(model, Min, t)\noptimize!(model)\n@assert is_solved_and_feasible(model)\nvalue(t), value.(x)","category":"page"},{"location":"tutorials/conic/tips_and_tricks/#Rotated-Second-Order-Cone","page":"Modeling with cones","title":"Rotated Second-Order Cone","text":"","category":"section"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"A Second-Order Cone rotated by pi4 in the (x_1x_2) plane is called a RotatedSecondOrderCone. It is a cone of the form:","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"K_rsoc = (tux) in mathbbR^n 2tu ge x_2^2 tu ge 0 ","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"When u = 0.5, it represents the sum of squares of a vector x:","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"data = [1.0, 2.0, 3.0, 4.0]\ntarget = [0.45, 1.04, 1.51, 1.97]\nmodel = Model(SCS.Optimizer)\nset_silent(model)\n@variable(model, θ)\n@variable(model, t)\n@expression(model, residuals, θ * data .- target)\n@constraint(model, [t; 0.5; residuals] in RotatedSecondOrderCone())\n@objective(model, Min, t)\noptimize!(model)\n@assert is_solved_and_feasible(model)\nvalue(θ), value(t)","category":"page"},{"location":"tutorials/conic/tips_and_tricks/#Exponential-Cone","page":"Modeling with cones","title":"Exponential Cone","text":"","category":"section"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"The MOI.ExponentialCone is a set of the form:","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"K_exp = (xyz) in mathbbR^3 y exp (xy) le z y 0 ","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"It can be used to model problems involving log and exp.","category":"page"},{"location":"tutorials/conic/tips_and_tricks/#Exponential","page":"Modeling with cones","title":"Exponential","text":"","category":"section"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"To model exp(x) le z, use (x, 1, z):","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"model = Model(SCS.Optimizer)\nset_silent(model)\n@variable(model, x == 1.5)\n@variable(model, z)\n@objective(model, Min, z)\n@constraint(model, [x, 1, z] in MOI.ExponentialCone())\noptimize!(model)\n@assert is_solved_and_feasible(model)\nvalue(z), exp(1.5)","category":"page"},{"location":"tutorials/conic/tips_and_tricks/#Logarithm","page":"Modeling with cones","title":"Logarithm","text":"","category":"section"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"To model x le log(z), use (x, 1, z):","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"model = Model(SCS.Optimizer)\nset_silent(model)\n@variable(model, x)\n@variable(model, z == 1.5)\n@objective(model, Max, x)\n@constraint(model, [x, 1, z] in MOI.ExponentialCone())\noptimize!(model)\n@assert is_solved_and_feasible(model)\nvalue(x), log(1.5)","category":"page"},{"location":"tutorials/conic/tips_and_tricks/#Log-sum-exp","page":"Modeling with cones","title":"Log-sum-exp","text":"","category":"section"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"To model t ge logleft(sum e^x_iright), use:","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"N = 3\nx0 = rand(N)\nmodel = Model(SCS.Optimizer)\nset_silent(model)\n@variable(model, x[i = 1:N] == x0[i])\n@variable(model, t)\n@objective(model, Min, t)\n@variable(model, u[1:N])\n@constraint(model, sum(u) <= 1)\n@constraint(model, [i = 1:N], [x[i] - t, 1, u[i]] in MOI.ExponentialCone())\noptimize!(model)\nvalue(t), log(sum(exp.(x0)))","category":"page"},{"location":"tutorials/conic/tips_and_tricks/#Entropy","page":"Modeling with cones","title":"Entropy","text":"","category":"section"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"The entropy maximization problem consists of maximizing the entropy function, H(x) = -xlogx subject to linear inequality constraints.","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"beginaligned\n max - sum_i=1^n x_i log x_i \n textst mathbf1^top x = 1 \n Ax leq b\nendaligned","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"We can model this problem using an exponential cone by using the following transformation:","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"tleq -xlogx iff tleq xlog(1x) iff (t x 1) in K_exp","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"Thus, our problem becomes,","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"beginaligned\n max 1^Tt \n textst Ax leq b \n 1^T x = 1 \n (t_i x_i 1) in K_exp forall i = 1 ldots n \nendaligned","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"m, n = 10, 15\nA, b = randn(m, n), rand(m, 1)\nmodel = Model(SCS.Optimizer)\nset_silent(model)\n@variable(model, t[1:n])\n@variable(model, x[1:n])\n@objective(model, Max, sum(t))\n@constraint(model, sum(x) == 1)\n@constraint(model, A * x .<= b)\n@constraint(model, [i = 1:n], [t[i], x[i], 1] in MOI.ExponentialCone())\noptimize!(model)\n@assert is_solved_and_feasible(model)\nobjective_value(model)","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"The MOI.ExponentialCone has a dual, the MOI.DualExponentialCone, that offers an alternative formulation that can be more efficient for some formulations.","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"There is also the MOI.RelativeEntropyCone for explicitly encoding the relative entropy function","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"model = Model(SCS.Optimizer)\nset_silent(model)\n@variable(model, t)\n@variable(model, x[1:n])\n@objective(model, Max, -t)\n@constraint(model, sum(x) == 1)\n@constraint(model, A * x .<= b)\n@constraint(model, [t; ones(n); x] in MOI.RelativeEntropyCone(2n + 1))\noptimize!(model)\n@assert is_solved_and_feasible(model)\nobjective_value(model)","category":"page"},{"location":"tutorials/conic/tips_and_tricks/#PowerCone","page":"Modeling with cones","title":"PowerCone","text":"","category":"section"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"The MOI.PowerCone is a three-dimensional set parameterized by a scalar value α. It has the form:","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"K_p = (xyz) in mathbbR^3 x^alpha y^1-alpha ge z x ge 0 y ge 0 ","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"The power cone permits a number of reformulations. For example, when p 1, we can model t ge x^p using the power cone (t 1 x) with alpha = 1 p. Thus, to model t ge x^3 with x ge 0","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"model = Model(SCS.Optimizer)\nset_silent(model)\n@variable(model, t)\n@variable(model, x >= 1.5)\n@constraint(model, [t, 1, x] in MOI.PowerCone(1 / 3))\n@objective(model, Min, t)\noptimize!(model)\n@assert is_solved_and_feasible(model)\nvalue(t), value(x)","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"The MOI.PowerCone has a dual, the MOI.DualPowerCone, that offers an alternative formulation that can be more efficient for some formulations.","category":"page"},{"location":"tutorials/conic/tips_and_tricks/#P-Norm","page":"Modeling with cones","title":"P-Norm","text":"","category":"section"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"The p-norm x_p = left(sumlimits_i x_i^pright)^frac1p can be modeled using MOI.PowerCones. See the Mosek Modeling Cookbook for the derivation.","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"function p_norm(x::Vector, p)\n N = length(x)\n model = Model(SCS.Optimizer)\n set_silent(model)\n @variable(model, r[1:N])\n @variable(model, t)\n @constraint(model, [i = 1:N], [r[i], t, x[i]] in MOI.PowerCone(1 / p))\n @constraint(model, sum(r) == t)\n @objective(model, Min, t)\n optimize!(model)\n @assert is_solved_and_feasible(model)\n return value(t)\nend\n\nx = rand(5);\nLinearAlgebra.norm(x, 4), p_norm(x, 4)","category":"page"},{"location":"tutorials/conic/tips_and_tricks/#Positive-Semidefinite-Cone","page":"Modeling with cones","title":"Positive Semidefinite Cone","text":"","category":"section"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"The set of positive semidefinite matrices (PSD) of dimension n form a cone in mathbbR^n. We write this set mathematically as:","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"mathcalS_+^n = X in mathcalS^n mid z^T X z geq 0 forall zin mathbbR^n ","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"A PSD cone is represented in JuMP using the MOI sets PositiveSemidefiniteConeTriangle (for upper triangle of a PSD matrix) and PositiveSemidefiniteConeSquare (for a complete PSD matrix). However, it is preferable to use the PSDCone shortcut as illustrated below.","category":"page"},{"location":"tutorials/conic/tips_and_tricks/#Example:-largest-eigenvalue-of-a-symmetric-matrix","page":"Modeling with cones","title":"Example: largest eigenvalue of a symmetric matrix","text":"","category":"section"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"Suppose A has eigenvalues lambda_1 geq lambda_2 ldots geq lambda_n. Then the matrix t I-A has eigenvalues t-lambda_1 t-lambda_2 ldots t-lambda_n. Note that t I-A is PSD exactly when all these eigenvalues are non-negative, and this happens for values t geq lambda_1. Thus, we can model the problem of finding the largest eigenvalue of a symmetric matrix as:","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"beginaligned\nlambda_1 = min t \ntext st t I-A succeq 0\nendaligned","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"A = [3 2 4; 2 0 2; 4 2 3]\nI = Matrix{Float64}(LinearAlgebra.I, 3, 3)\nmodel = Model(SCS.Optimizer)\nset_silent(model)\n@variable(model, t)\n@objective(model, Min, t)\n@constraint(model, t .* I - A in PSDCone())\noptimize!(model)\n@assert is_solved_and_feasible(model)\nobjective_value(model)","category":"page"},{"location":"tutorials/conic/tips_and_tricks/#GeometricMeanCone","page":"Modeling with cones","title":"GeometricMeanCone","text":"","category":"section"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"The MOI.GeometricMeanCone is a cone of the form:","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"K_geo = (t x) in mathbbR^n x ge 0 t le sqrtn-1x_1 x_2 cdots x_n-1 ","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"model = Model(SCS.Optimizer)\nset_silent(model)\n@variable(model, x[1:4])\n@variable(model, t)\n@constraint(model, sum(x) == 1)\n@constraint(model, [t; x] in MOI.GeometricMeanCone(5))\noptimize!(model)\nvalue(t), value.(x)","category":"page"},{"location":"tutorials/conic/tips_and_tricks/#RootDetCone","page":"Modeling with cones","title":"RootDetCone","text":"","category":"section"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"The MOI.RootDetConeSquare is a cone of the form:","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"K = (t X) in mathbbR^1+d^2 t le det(X)^frac1d ","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"model = Model(SCS.Optimizer)\nset_silent(model)\n@variable(model, t)\n@variable(model, X[1:2, 1:2])\n@objective(model, Max, t)\n@constraint(model, [t; vec(X)] in MOI.RootDetConeSquare(2))\n@constraint(model, X .== [2 1; 1 3])\noptimize!(model)\n@assert is_solved_and_feasible(model)\nvalue(t), sqrt(LinearAlgebra.det(value.(X)))","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"If X is symmetric, then you can use MOI.RootDetConeTriangle instead. This can be more efficient because the solver does not need to add additional constraints to ensure X is symmetric.","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"When forming the function, use triangle_vec to obtain the column-wise upper triangle of the matrix as a vector in the order that JuMP requires.","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"model = Model(SCS.Optimizer)\nset_silent(model)\n@variable(model, t)\n@variable(model, X[1:2, 1:2], Symmetric)\n@objective(model, Max, t)\n@constraint(model, [t; triangle_vec(X)] in MOI.RootDetConeTriangle(2))\n@constraint(model, X .== [2 1; 1 3])\noptimize!(model)\nvalue(t), sqrt(LinearAlgebra.det(value.(X)))","category":"page"},{"location":"tutorials/conic/tips_and_tricks/#LogDetCone","page":"Modeling with cones","title":"LogDetCone","text":"","category":"section"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"The MOI.LogDetConeSquare is a cone of the form:","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"K = (t u X) in mathbbR^2+d^2 t le u log(det(X u)) ","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"model = Model(SCS.Optimizer)\nset_silent(model)\n@variable(model, t)\n@variable(model, u)\n@variable(model, X[1:2, 1:2])\n@objective(model, Max, t)\n@constraint(model, [t; u; vec(X)] in MOI.LogDetConeSquare(2))\n@constraint(model, X .== [2 1; 1 3])\n@constraint(model, u == 0.5)\noptimize!(model)\n@assert is_solved_and_feasible(model)\nvalue(t), 0.5 * log(LinearAlgebra.det(value.(X) ./ 0.5))","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"If X is symmetric, then you can use MOI.LogDetConeTriangle instead. This can be more efficient because the solver does not need to add additional constraints to ensure X is symmetric.","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"When forming the function, use triangle_vec to obtain the column-wise upper triangle of the matrix as a vector in the order that JuMP requires.","category":"page"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"model = Model(SCS.Optimizer)\nset_silent(model)\n@variable(model, t)\n@variable(model, u)\n@variable(model, X[1:2, 1:2], Symmetric)\n@objective(model, Max, t)\n@constraint(model, [t; u; triangle_vec(X)] in MOI.LogDetConeTriangle(2))\n@constraint(model, X .== [2 1; 1 3])\n@constraint(model, u == 0.5)\noptimize!(model)\n@assert is_solved_and_feasible(model)\nvalue(t), 0.5 * log(LinearAlgebra.det(value.(X) ./ 0.5))","category":"page"},{"location":"tutorials/conic/tips_and_tricks/#Other-Cones-and-Functions","page":"Modeling with cones","title":"Other Cones and Functions","text":"","category":"section"},{"location":"tutorials/conic/tips_and_tricks/","page":"Modeling with cones","title":"Modeling with cones","text":"For other cones supported by JuMP, check out the MathOptInterface Manual.","category":"page"},{"location":"packages/BARON/","page":"jump-dev/BARON.jl","title":"jump-dev/BARON.jl","text":"EditURL = \"https://github.com/jump-dev/BARON.jl/blob/v0.8.5/README.md\"","category":"page"},{"location":"packages/BARON/#BARON.jl","page":"jump-dev/BARON.jl","title":"BARON.jl","text":"","category":"section"},{"location":"packages/BARON/","page":"jump-dev/BARON.jl","title":"jump-dev/BARON.jl","text":"(Image: Build Status) (Image: codecov)","category":"page"},{"location":"packages/BARON/","page":"jump-dev/BARON.jl","title":"jump-dev/BARON.jl","text":"BARON.jl is a wrapper for BARON by The Optimization Firm.","category":"page"},{"location":"packages/BARON/#Affiliation","page":"jump-dev/BARON.jl","title":"Affiliation","text":"","category":"section"},{"location":"packages/BARON/","page":"jump-dev/BARON.jl","title":"jump-dev/BARON.jl","text":"This wrapper is maintained by the JuMP community and is not officially supported by The Optimization Firm.","category":"page"},{"location":"packages/BARON/#Getting-help","page":"jump-dev/BARON.jl","title":"Getting help","text":"","category":"section"},{"location":"packages/BARON/","page":"jump-dev/BARON.jl","title":"jump-dev/BARON.jl","text":"If you need help, please ask a question on the JuMP community forum.","category":"page"},{"location":"packages/BARON/","page":"jump-dev/BARON.jl","title":"jump-dev/BARON.jl","text":"If you have a reproducible example of a bug, please open a GitHub issue.","category":"page"},{"location":"packages/BARON/#License","page":"jump-dev/BARON.jl","title":"License","text":"","category":"section"},{"location":"packages/BARON/","page":"jump-dev/BARON.jl","title":"jump-dev/BARON.jl","text":"BARON.jl is licensed under the MIT License.","category":"page"},{"location":"packages/BARON/","page":"jump-dev/BARON.jl","title":"jump-dev/BARON.jl","text":"The underlying solver is a closed-source commercial product for which you must obtain a license from The Optimization Firm, although a small trial version is available for free.","category":"page"},{"location":"packages/BARON/#Installation","page":"jump-dev/BARON.jl","title":"Installation","text":"","category":"section"},{"location":"packages/BARON/","page":"jump-dev/BARON.jl","title":"jump-dev/BARON.jl","text":"First, download a copy of the BARON solver and unpack the executable in a location of your choosing.","category":"page"},{"location":"packages/BARON/","page":"jump-dev/BARON.jl","title":"jump-dev/BARON.jl","text":"Once installed, set the BARON_EXEC environment variable pointing to the BARON executable (full path, including file name as it differs across platforms), and run Pkg.add(\"BARON\"). For example:","category":"page"},{"location":"packages/BARON/","page":"jump-dev/BARON.jl","title":"jump-dev/BARON.jl","text":"ENV[\"BARON_EXEC\"] = \"/path/to/baron.exe\"\nusing Pkg\nPkg.add(\"BARON\")","category":"page"},{"location":"packages/BARON/","page":"jump-dev/BARON.jl","title":"jump-dev/BARON.jl","text":"The baronlice.txt license file should be placed in the same directory as the BARON executable, or in your current working directory.","category":"page"},{"location":"packages/BARON/#Use-with-JuMP","page":"jump-dev/BARON.jl","title":"Use with JuMP","text":"","category":"section"},{"location":"packages/BARON/","page":"jump-dev/BARON.jl","title":"jump-dev/BARON.jl","text":"using JuMP, BARON\nmodel = Model(BARON.Optimizer)","category":"page"},{"location":"packages/BARON/#MathOptInterface-API","page":"jump-dev/BARON.jl","title":"MathOptInterface API","text":"","category":"section"},{"location":"packages/BARON/","page":"jump-dev/BARON.jl","title":"jump-dev/BARON.jl","text":"The BARON optimizer supports the following constraints and attributes.","category":"page"},{"location":"packages/BARON/","page":"jump-dev/BARON.jl","title":"jump-dev/BARON.jl","text":"List of supported objective functions:","category":"page"},{"location":"packages/BARON/","page":"jump-dev/BARON.jl","title":"jump-dev/BARON.jl","text":"MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}\nMOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{Float64}}\nMOI.ObjectiveFunction{MOI.ScalarNonlinearFunction}","category":"page"},{"location":"packages/BARON/","page":"jump-dev/BARON.jl","title":"jump-dev/BARON.jl","text":"List of supported variable types:","category":"page"},{"location":"packages/BARON/","page":"jump-dev/BARON.jl","title":"jump-dev/BARON.jl","text":"MOI.Reals","category":"page"},{"location":"packages/BARON/","page":"jump-dev/BARON.jl","title":"jump-dev/BARON.jl","text":"List of supported constraint types:","category":"page"},{"location":"packages/BARON/","page":"jump-dev/BARON.jl","title":"jump-dev/BARON.jl","text":"MOI.ScalarAffineFunction{Float64} in MOI.EqualTo{Float64}\nMOI.ScalarAffineFunction{Float64} in MOI.GreaterThan{Float64}\nMOI.ScalarAffineFunction{Float64} in MOI.Interval{Float64}\nMOI.ScalarAffineFunction{Float64} in MOI.LessThan{Float64}\nMOI.ScalarQuadraticFunction{Float64} in MOI.EqualTo{Float64}\nMOI.ScalarQuadraticFunction{Float64} in MOI.GreaterThan{Float64}\nMOI.ScalarQuadraticFunction{Float64} in MOI.Interval{Float64}\nMOI.ScalarQuadraticFunction{Float64} in MOI.LessThan{Float64}\nMOI.ScalarNonlinearFunction in MOI.EqualTo{Float64}\nMOI.ScalarNonlinearFunction in MOI.GreaterThan{Float64}\nMOI.ScalarNonlinearFunction in MOI.Interval{Float64}\nMOI.ScalarNonlinearFunction in MOI.LessThan{Float64}\nMOI.VariableIndex in MOI.EqualTo{Float64}\nMOI.VariableIndex in MOI.GreaterThan{Float64}\nMOI.VariableIndex in MOI.Integer\nMOI.VariableIndex in MOI.Interval{Float64}\nMOI.VariableIndex in MOI.LessThan{Float64}\nMOI.VariableIndex in MOI.ZeroOne","category":"page"},{"location":"packages/BARON/","page":"jump-dev/BARON.jl","title":"jump-dev/BARON.jl","text":"List of supported model attributes:","category":"page"},{"location":"packages/BARON/","page":"jump-dev/BARON.jl","title":"jump-dev/BARON.jl","text":"MOI.NLPBlock()\nMOI.ObjectiveSense()","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"EditURL = \"getting_started_with_julia.jl\"","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/#Getting-started-with-Julia","page":"Getting started with Julia","title":"Getting started with Julia","text":"","category":"section"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"This tutorial was generated using Literate.jl. Download the source as a .jl file.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Because JuMP is embedded in Julia, knowing some basic Julia is important before you start learning JuMP.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"tip: Tip\nThis tutorial is designed to provide a minimalist crash course in the basics of Julia. You can find resources that provide a more comprehensive introduction to Julia here.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/#Installing-Julia","page":"Getting started with Julia","title":"Installing Julia","text":"","category":"section"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"To install Julia, download the latest stable release, then follow the platform specific install instructions.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"tip: Tip\nUnless you know otherwise, you probably want the 64-bit version.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Next, you need an IDE to develop in. VS Code is a popular choice, so follow these install instructions.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Julia can also be used with Jupyter notebooks or the reactive notebooks of Pluto.jl.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/#The-Julia-REPL","page":"Getting started with Julia","title":"The Julia REPL","text":"","category":"section"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"The main way of interacting with Julia is via its REPL (Read Evaluate Print Loop). To access the REPL, start the Julia executable to arrive at the julia> prompt, and then start coding:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"1 + 1","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"As your programs become larger, write a script as a text file, and then run that file using:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"julia> include(\"path/to/file.jl\")","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"warning: Warning\nBecause of Julia's startup latency, running scripts from the command line like the following is slow:$ julia path/to/file.jlUse the REPL or a notebook instead, and read The \"time-to-first-solve\" issue for more information.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/#Code-blocks-in-this-documentation","page":"Getting started with Julia","title":"Code blocks in this documentation","text":"","category":"section"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"In this documentation you'll see a mix of code examples with and without the julia>.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"The Julia prompt is mostly used to demonstrate short code snippets, and the output is exactly what you will see if run from the REPL.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Blocks without the julia> can be copy-pasted into the REPL, but they are used because they enable richer output like plots or LaTeX to be displayed in the online and PDF versions of the documentation. If you run them from the REPL you may see different output.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/#Where-to-get-help","page":"Getting started with Julia","title":"Where to get help","text":"","category":"section"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Read the documentation\nJuMP https://jump.dev/JuMP.jl/stable/\nJulia https://docs.julialang.org/en/v1/\nAsk (or browse) the Julia community forum: https://discourse.julialang.org\nIf the question is JuMP-related, ask in the Optimization (Mathematical) section, or tag your question with \"jump\"","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"To access the built-in help at the REPL, type ? to enter help-mode, followed by the name of the function to lookup:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"help?> print\nsearch: print println printstyled sprint isprint prevind parentindices precision escape_string\n\n print([io::IO], xs...)\n\n Write to io (or to the default output stream stdout if io is not given) a canonical\n (un-decorated) text representation. The representation used by print includes minimal formatting\n and tries to avoid Julia-specific details.\n\n print falls back to calling show, so most types should just define show. Define print if your\n type has a separate \"plain\" representation. For example, show displays strings with quotes, and\n print displays strings without quotes.\n\n string returns the output of print as a string.\n\n Examples\n ≡≡≡≡≡≡≡≡≡≡\n\n julia> print(\"Hello World!\")\n Hello World!\n julia> io = IOBuffer();\n\n julia> print(io, \"Hello\", ' ', :World!)\n\n julia> String(take!(io))\n \"Hello World!\"","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/#Numbers-and-arithmetic","page":"Getting started with Julia","title":"Numbers and arithmetic","text":"","category":"section"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Since we want to solve optimization problems, we're going to be using a lot of math. Luckily, Julia is great for math, with all the usual operators:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"1 + 1\n1 - 2\n2 * 2\n4 / 5\n3^2","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Did you notice how Julia didn't print .0 after some of the numbers? Julia is a dynamic language, which means you never have to explicitly declare the type of a variable. However, in the background, Julia is giving each variable a type. Check the type of something using the typeof function:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"typeof(1)\ntypeof(1.0)","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Here 1 is an Int64, which is an integer with 64 bits of precision, and 1.0 is a Float64, which is a floating point number with 64-bits of precision.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"tip: Tip\nIf you aren't familiar with floating point numbers, make sure to read the Floating point numbers section.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"We create complex numbers using im:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"x = 2 + 1im\nreal(x)\nimag(x)\ntypeof(x)\nx * (1 - 2im)","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"info: Info\nThe curly brackets surround what we call the parameters of a type. You can read Complex{Int64} as \"a complex number, where the real and imaginary parts are represented by Int64.\" If we call typeof(1.0 + 2.0im) it will be Complex{Float64}, which a complex number with the parts represented by Float64.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"There are also some cool things like an irrational representation of π.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"π","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"tip: Tip\nTo make π (and most other Greek letters), type \\pi and then press [TAB].","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"typeof(π)","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"However, if we do math with irrational numbers, they get converted to Float64:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"typeof(2π / 3)","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/#Floating-point-numbers","page":"Getting started with Julia","title":"Floating point numbers","text":"","category":"section"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"warning: Warning\nIf you aren't familiar with floating point numbers, make sure to read this section carefully.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"A Float64 is a floating point approximation of a real number using 64-bits of information.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Because it is an approximation, things we know hold true in mathematics don't hold true in a computer. For example:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"0.1 * 3 == 0.3","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"A more complicated example is:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"sin(2π / 3) == √3 / 2","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"tip: Tip\nGet √ by typing \\sqrt then press [TAB].","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Let's see what the differences are:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"0.1 * 3 - 0.3\nsin(2π / 3) - √3 / 2","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"They are small, but not zero.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"One way of explaining this difference is to consider how we would write 1 / 3 and 2 / 3 using only four digits after the decimal point. We would write 1 / 3 as 0.3333, and 2 / 3 as 0.6667. So, despite the fact that 2 * (1 / 3) == 2 / 3, 2 * 0.3333 == 0.6666 != 0.6667.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Let's try that again using ≈ (\\approx + [TAB]) instead of ==:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"0.1 * 3 ≈ 0.3\nsin(2π / 3) ≈ √3 / 2","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"≈ is a clever way of calling the isapprox function:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"isapprox(sin(2π / 3), √3 / 2; atol = 1e-8)","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"warning: Warning\nFloating point is the reason solvers use tolerances when they solve optimization models. A common mistake you're likely to make is checking whether a binary variable is 0 using value(z) == 0. Always remember to use something like isapprox when comparing floating point numbers.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Note that isapprox will always return false if one of the number being compared is 0 and atol is zero (its default value).","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"1e-300 ≈ 0.0","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"so always set a nonzero value of atol if one of the arguments can be zero.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"isapprox(1e-9, 0.0; atol = 1e-8)","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"tip: Tip\nGurobi has a good series of articles on the implications of floating point in optimization if you want to read more.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"If you aren't careful, floating point arithmetic can throw up all manner of issues. For example:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"1 + 1e-16 == 1","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"It even turns out that floating point numbers aren't associative:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"(1 + 1e-16) - 1e-16 == 1 + (1e-16 - 1e-16)","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"It's important to note that this issue isn't Julia-specific. It happens in every programming language (try it out in Python).","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/#Vectors,-matrices,-and-arrays","page":"Getting started with Julia","title":"Vectors, matrices, and arrays","text":"","category":"section"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Similar to MATLAB, Julia has native support for vectors, matrices and tensors; all of which are represented by arrays of different dimensions. Vectors are constructed by comma-separated elements surrounded by square brackets:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"b = [5, 6]","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Matrices can be constructed with spaces separating the columns, and semicolons separating the rows:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"A = [1.0 2.0; 3.0 4.0]","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"We can do linear algebra:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"x = A \\ b","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"info: Info\nHere is floating point at work again; x is approximately [-4, 4.5].","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"A * x\nA * x ≈ b","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Note that when multiplying vectors and matrices, dimensions matter. For example, you can't multiply a vector by a vector:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"try #hide\n b * b\ncatch err #hide\n showerror(stderr, err) #hide\nend #hide","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"But multiplying transposes works:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"b' * b\nb * b'","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/#Other-common-types","page":"Getting started with Julia","title":"Other common types","text":"","category":"section"},{"location":"tutorials/getting_started/getting_started_with_julia/#Comments","page":"Getting started with Julia","title":"Comments","text":"","category":"section"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Although not technically a type, code comments begin with the # character:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"1 + 1 # This is a comment","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Multiline comments begin with #= and end with =#:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"#=\nHere is a\nmultiline comment\n=#","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Comments can even be nested inside expressions. This is sometimes helpful when documenting inputs to functions:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"isapprox(\n sin(π),\n 0.0;\n #= We need an explicit atol here because we are comparing with 0 =#\n atol = 0.001,\n)","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/#Strings","page":"Getting started with Julia","title":"Strings","text":"","category":"section"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Double quotes are used for strings:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"typeof(\"This is Julia\")","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Unicode is fine in strings:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"typeof(\"π is about 3.1415\")","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Use println to print a string:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"println(\"Hello, World!\")","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Use $() to interpolate values into a string:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"x = 123\nprintln(\"The value of x is: $(x)\")","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Use triple-quotes for multiline strings:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"s = \"\"\"\nHere is\na\nmultiline string\n\"\"\"\n\nprintln(s)","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/#Symbols","page":"Getting started with Julia","title":"Symbols","text":"","category":"section"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Julia Symbols are a data structure from the compiler that represent Julia identifiers (that is, variable names).","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"println(\"The value of x is: $(eval(:x))\")","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"warning: Warning\nWe used eval here to demonstrate how Julia links Symbols to variables. However, avoid calling eval in your code. It is usually a sign that your code is doing something that could be more easily achieved a different way. The Community Forum is a good place to ask for advice on alternative approaches.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"typeof(:x)","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"You can think of a Symbol as a String that takes up less memory, and that can't be modified.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Convert between String and Symbol using their constructors:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"String(:abc)\nSymbol(\"abc\")","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"tip: Tip\nSymbols are often (ab)used to stand in for a String or an Enum, when one of the latter is likely a better choice. The JuMP Style guide recommends reserving Symbols for identifiers. See @enum vs. Symbol for more.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/#Tuples","page":"Getting started with Julia","title":"Tuples","text":"","category":"section"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Julia makes extensive use of a simple data structure called Tuples. Tuples are immutable collections of values. For example:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"t = (\"hello\", 1.2, :foo)\ntypeof(t)","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Tuples can be accessed by index, similar to arrays:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"t[2]","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"And they can be \"unpacked\" like so:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"a, b, c = t\nb","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"The values can also be given names, which is a convenient way of making light-weight data structures.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"t = (word = \"hello\", num = 1.2, sym = :foo)","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Values can be accessed using dot syntax:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"t.word","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/#Dictionaries","page":"Getting started with Julia","title":"Dictionaries","text":"","category":"section"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Similar to Python, Julia has native support for dictionaries. Dictionaries provide a very generic way of mapping keys to values. For example, a map of integers to strings:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"d1 = Dict(1 => \"A\", 2 => \"B\", 4 => \"D\")","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"info: Info\nType-stuff again: Dict{Int64,String} is a dictionary with Int64 keys and String values.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Looking up a value uses the bracket syntax:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"d1[2]","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Dictionaries support non-integer keys and can mix data types:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Dict(\"A\" => 1, \"B\" => 2.5, \"D\" => 2 - 3im)","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"info: Info\nJulia types form a hierarchy. Here the value type of the dictionary is Number, which is a generalization of Int64, Float64, and Complex{Int}. Leaf nodes in this hierarchy are called \"concrete\" types, and all others are called \"Abstract.\" In general, having variables with abstract types like Number can lead to slower code, so you should try to make sure every element in a dictionary or vector is the same type. For example, in this case we could represent every element as a Complex{Float64}:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Dict(\"A\" => 1.0 + 0.0im, \"B\" => 2.5 + 0.0im, \"D\" => 2.0 - 3.0im)","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Dictionaries can be nested:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"d2 = Dict(\"A\" => 1, \"B\" => 2, \"D\" => Dict(:foo => 3, :bar => 4))\nd2[\"B\"]\nd2[\"D\"][:foo]","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/#Structs","page":"Getting started with Julia","title":"Structs","text":"","category":"section"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"You can define custom datastructures with struct:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"struct MyStruct\n x::Int\n y::String\n z::Dict{Int,Int}\nend\n\na = MyStruct(1, \"a\", Dict(2 => 3))\na.x","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"By default, these are not mutable","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"try #hide\n a.x = 2\ncatch err #hide\n showerror(stderr, err) #hide\nend #hide","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"However, you can declare a mutable struct which is mutable:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"mutable struct MyStructMutable\n x::Int\n y::String\n z::Dict{Int,Int}\nend\n\na = MyStructMutable(1, \"a\", Dict(2 => 3))\na.x\na.x = 2\na","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/#Loops","page":"Getting started with Julia","title":"Loops","text":"","category":"section"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Julia has native support for for-each style loops with the syntax for in end:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"for i in 1:5\n println(i)\nend","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"info: Info\nRanges are constructed as start:stop, or start:step:stop.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"for i in 1.2:1.1:5.6\n println(i)\nend","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"This for-each loop also works with dictionaries:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"for (key, value) in Dict(\"A\" => 1, \"B\" => 2.5, \"D\" => 2 - 3im)\n println(\"$(key): $(value)\")\nend","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Note that in contrast to vector languages like MATLAB and R, loops do not result in a significant performance degradation in Julia.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/#Control-flow","page":"Getting started with Julia","title":"Control flow","text":"","category":"section"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Julia control flow is similar to MATLAB, using the keywords if-elseif-else-end, and the logical operators || and && for or and and respectively:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"for i in 0:5:15\n if i < 5\n println(\"$(i) is less than 5\")\n elseif i < 10\n println(\"$(i) is less than 10\")\n else\n if i == 10\n println(\"the value is 10\")\n else\n println(\"$(i) is bigger than 10\")\n end\n end\nend","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/#Comprehensions","page":"Getting started with Julia","title":"Comprehensions","text":"","category":"section"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Similar to languages like Haskell and Python, Julia supports the use of simple loops in the construction of arrays and dictionaries, called comprehensions.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"A list of increasing integers:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"[i for i in 1:5]","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Matrices can be built by including multiple indices:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"[i * j for i in 1:5, j in 5:10]","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Conditional statements can be used to filter out some values:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"[i for i in 1:10 if i % 2 == 1]","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"A similar syntax can be used for building dictionaries:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Dict(\"$(i)\" => i for i in 1:10 if i % 2 == 1)","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/#Functions","page":"Getting started with Julia","title":"Functions","text":"","category":"section"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"A simple function is defined as follows:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"function print_hello()\n return println(\"hello\")\nend\nprint_hello()","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Arguments can be added to a function:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"function print_it(x)\n return println(x)\nend\nprint_it(\"hello\")\nprint_it(1.234)\nprint_it(:my_id)","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Optional keyword arguments are also possible:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"function print_it(x; prefix = \"value:\")\n return println(\"$(prefix) $(x)\")\nend\nprint_it(1.234)\nprint_it(1.234; prefix = \"val:\")","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"The keyword return is used to specify the return values of a function:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"function mult(x; y = 2.0)\n return x * y\nend\n\nmult(4.0)\nmult(4.0; y = 5.0)","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/#Anonymous-functions","page":"Getting started with Julia","title":"Anonymous functions","text":"","category":"section"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"The syntax input -> output creates an anonymous function. These are most useful when passed to other functions. For example:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"f = x -> x^2\nf(2)\nmap(x -> x^2, 1:4)","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/#Type-parameters","page":"Getting started with Julia","title":"Type parameters","text":"","category":"section"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"We can constrain the inputs to a function using type parameters, which are :: followed by the type of the input we want. For example:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"function foo(x::Int)\n return x^2\nend\n\nfunction foo(x::Float64)\n return exp(x)\nend\n\nfunction foo(x::Number)\n return x + 1\nend\n\nfoo(2)\nfoo(2.0)\nfoo(1 + 1im)","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"But what happens if we call foo with something we haven't defined it for?","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"try #hide\n foo([1, 2, 3])\ncatch err #hide\n showerror(stderr, err) #hide\nend #hide","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"A MethodError means that you passed a function something that didn't match the type that it was expecting. In this case, the error message says that it doesn't know how to handle an Vector{Int64}, but it does know how to handle Float64, Int64, and Number.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"tip: Tip\nRead the \"Closest candidates\" part of the error message carefully to get a hint as to what was expected.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/#Broadcasting","page":"Getting started with Julia","title":"Broadcasting","text":"","category":"section"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"In the example above, we didn't define what to do if f was passed a Vector. Luckily, Julia provides a convenient syntax for mapping f element-wise over arrays. Just add a . between the name of the function and the opening (. This works for any function, including functions with multiple arguments. For example:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"foo.([1, 2, 3])","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"tip: Tip\nGet a MethodError when calling a function that takes a Vector, Matrix, or Array? Try broadcasting.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/#Mutable-vs-immutable-objects","page":"Getting started with Julia","title":"Mutable vs immutable objects","text":"","category":"section"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Some types in Julia are mutable, which means you can change the values inside them. A good example is an array. You can modify the contents of an array without having to make a new array.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"In contrast, types like Float64 are immutable. You cannot modify the contents of a Float64.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"This is something to be aware of when passing types into functions. For example:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"function mutability_example(mutable_type::Vector{Int}, immutable_type::Int)\n mutable_type[1] += 1\n immutable_type += 1\n return\nend\n\nmutable_type = [1, 2, 3]\nimmutable_type = 1\n\nmutability_example(mutable_type, immutable_type)\n\nprintln(\"mutable_type: $(mutable_type)\")\nprintln(\"immutable_type: $(immutable_type)\")","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Because Vector{Int} is a mutable type, modifying the variable inside the function changed the value outside of the function. In contrast, the change to immutable_type didn't modify the value outside the function.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"You can check mutability with the isimmutable function:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"isimmutable([1, 2, 3])\nisimmutable(1)","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/#The-package-manager","page":"Getting started with Julia","title":"The package manager","text":"","category":"section"},{"location":"tutorials/getting_started/getting_started_with_julia/#Installing-packages","page":"Getting started with Julia","title":"Installing packages","text":"","category":"section"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"No matter how wonderful Julia's base language is, at some point you will want to use an extension package. Some of these are built-in, for example random number generation is available in the Random package in the standard library. These packages are loaded with the commands using and import.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"using Random # The equivalent of Python's `from Random import *`\nimport Random # The equivalent of Python's `import Random`\n\nRandom.seed!(33)\n\n[rand() for i in 1:10]","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"The Package Manager is used to install packages that are not part of Julia's standard library.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"For example the following can be used to install JuMP,","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"using Pkg\nPkg.add(\"JuMP\")","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"For a complete list of registered Julia packages see the package listing at JuliaHub.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"From time to you may wish to use a Julia package that is not registered. In this case a git repository URL can be used to install the package.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"using Pkg\nPkg.add(\"https://github.com/user-name/MyPackage.jl.git\")","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/#Package-environments","page":"Getting started with Julia","title":"Package environments","text":"","category":"section"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"By default, Pkg.add will add packages to Julia's global environment. However, Julia also has built-in support for virtual environments.","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"Activate a virtual environment with:","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"import Pkg; Pkg.activate(\"/path/to/environment\")","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"You can see what packages are installed in the current environment with Pkg.status().","category":"page"},{"location":"tutorials/getting_started/getting_started_with_julia/","page":"Getting started with Julia","title":"Getting started with Julia","text":"tip: Tip\nWe strongly recommend you create a Pkg environment for each project that you create in Julia, and add only the packages that you need, instead of adding lots of packages to the global environment. The Pkg manager documentation has more information on this topic.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"EditURL = \"tips_and_tricks.jl\"","category":"page"},{"location":"tutorials/linear/tips_and_tricks/#linear_tips_and_tricks","page":"Tips and tricks","title":"Tips and tricks","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"This tutorial was generated using Literate.jl. Download the source as a .jl file.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"This tutorial was originally contributed by Arpit Bhatia.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"tip: Tip\nA good source of tips is the Mosek Modeling Cookbook.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"This tutorial collates some tips and tricks you can use when formulating mixed-integer programs. It uses the following packages:","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"using JuMP","category":"page"},{"location":"tutorials/linear/tips_and_tricks/#Absolute-value","page":"Tips and tricks","title":"Absolute value","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"To model the absolute value function t ge x, there are a few options. In all cases, these reformulations only work if you are minimizing t \"down\" into x. They do not work if you are trying to maximize x.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/#Option-1","page":"Tips and tricks","title":"Option 1","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"This option adds two linear inequality constraints:","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"model = Model();\n@variable(model, x)\n@variable(model, t)\n@constraint(model, t >= x)\n@constraint(model, t >= -x)","category":"page"},{"location":"tutorials/linear/tips_and_tricks/#Option-2","page":"Tips and tricks","title":"Option 2","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"This option uses two non-negative variables and forms expressions for x and t:","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"model = Model();\n@variable(model, z[1:2] >= 0)\n@expression(model, t, z[1] + z[2])\n@expression(model, x, z[1] - z[2])","category":"page"},{"location":"tutorials/linear/tips_and_tricks/#Option-3","page":"Tips and tricks","title":"Option 3","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"This option uses MOI.NormOneCone and lets JuMP choose the reformulation:","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"model = Model();\n@variable(model, x)\n@variable(model, t)\n@constraint(model, [t; x] in MOI.NormOneCone(2))","category":"page"},{"location":"tutorials/linear/tips_and_tricks/#L1-norm","page":"Tips and tricks","title":"L1-norm","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"To model min x_1, that is, min sumlimits_i x_i, use the MOI.NormOneCone:","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"model = Model();\n@variable(model, x[1:3])\n@variable(model, t)\n@constraint(model, [t; x] in MOI.NormOneCone(1 + length(x)))\n@objective(model, Min, t)","category":"page"},{"location":"tutorials/linear/tips_and_tricks/#Infinity-norm","page":"Tips and tricks","title":"Infinity-norm","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"To model min x_infty, that is, min maxlimits_i x_i, use the MOI.NormInfinityCone:","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"model = Model();\n@variable(model, x[1:3])\n@variable(model, t)\n@constraint(model, [t; x] in MOI.NormInfinityCone(1 + length(x)))\n@objective(model, Min, t)","category":"page"},{"location":"tutorials/linear/tips_and_tricks/#Max","page":"Tips and tricks","title":"Max","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"To model t ge maxx y, do:","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"model = Model();\n@variable(model, t)\n@variable(model, x)\n@variable(model, y)\n@constraint(model, t >= x)\n@constraint(model, t >= y)","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"This reformulation does not work for t ge minx y.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/#Min","page":"Tips and tricks","title":"Min","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"To model t le minx y, do:","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"model = Model();\n@variable(model, t)\n@variable(model, x)\n@variable(model, y)\n@constraint(model, t <= x)\n@constraint(model, t <= y)","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"This reformulation does not work for t le maxx y.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/#Modulo","page":"Tips and tricks","title":"Modulo","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"To model y = x text mod n, where n is a constant modulus, we use the relationship x = n cdot z + y, where z in mathbbZ_+ is the number of times that n can be divided by x and y is the remainder.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"n = 4\nmodel = Model();\n@variable(model, x >= 0, Int)\n@variable(model, 0 <= y <= n - 1, Int)\n@variable(model, z >= 0, Int)\n@constraint(model, x == n * z + y)","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"The modulo reformulation is often useful for subdividing a time increment into units of time like hours and days:","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"model = Model();\n@variable(model, t >= 0, Int)\n@variable(model, 0 <= hours <= 23, Int)\n@variable(model, days >= 0, Int)\n@constraint(model, t == 24 * days + hours)","category":"page"},{"location":"tutorials/linear/tips_and_tricks/#Boolean-operators","page":"Tips and tricks","title":"Boolean operators","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"Binary variables can be used to construct logical operators. Here are some example.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/#Or","page":"Tips and tricks","title":"Or","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"x_3 = x_1 lor x_2","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"model = Model();\n@variable(model, x[1:3], Bin)\n@constraints(model, begin\n x[1] <= x[3]\n x[2] <= x[3]\n x[3] <= x[1] + x[2]\nend)","category":"page"},{"location":"tutorials/linear/tips_and_tricks/#And","page":"Tips and tricks","title":"And","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"x_3 = x_1 land x_2","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"model = Model();\n@variable(model, x[1:3], Bin)\n@constraints(model, begin\n x[3] <= x[1]\n x[3] <= x[2]\n x[3] >= x[1] + x[2] - 1\nend)","category":"page"},{"location":"tutorials/linear/tips_and_tricks/#Not","page":"Tips and tricks","title":"Not","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"x_1 neg x_2","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"model = Model();\n@variable(model, x[1:2], Bin)\n@constraint(model, x[1] == 1 - x[2])","category":"page"},{"location":"tutorials/linear/tips_and_tricks/#Implies","page":"Tips and tricks","title":"Implies","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"x_1 implies x_2","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"model = Model();\n@variable(model, x[1:2], Bin)\n@constraint(model, x[1] <= x[2])","category":"page"},{"location":"tutorials/linear/tips_and_tricks/#Disjunctions","page":"Tips and tricks","title":"Disjunctions","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/#Problem","page":"Tips and tricks","title":"Problem","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"Suppose that we have two constraints a^top x leq b and c^top x leq d, and we want at least one to hold.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/#Trick-1","page":"Tips and tricks","title":"Trick 1","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"Use an indicator constraint.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"Example Either x_1 leq 1 or x_2 leq 2.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"model = Model();\n@variable(model, x[1:2])\n@variable(model, y[1:2], Bin)\n@constraint(model, y[1] --> {x[1] <= 1})\n@constraint(model, y[2] --> {x[2] <= 2})\n@constraint(model, sum(y) == 1) # Exactly one branch must be true","category":"page"},{"location":"tutorials/linear/tips_and_tricks/#Trick-2","page":"Tips and tricks","title":"Trick 2","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"Introduce a \"big-M\" multiplied by a binary variable to relax one of the constraints.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"Example Either x_1 leq 1 or x_2 leq 2.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"model = Model();\n@variable(model, x[1:2] <= 10)\n@variable(model, y[1:2], Bin)\nM = 100\n@constraint(model, x[1] <= 1 + M * y[1])\n@constraint(model, x[2] <= 2 + M * y[2])\n@constraint(model, sum(y) == 1)","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"warning: Warning\nIf M is too small, the solution may be suboptimal. If M is too big, the solver may encounter numerical issues. Try to use domain knowledge to choose an M that is just right. Gurobi has a good documentation section on this topic.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/#tips_indicator_constraint","page":"Tips and tricks","title":"Indicator constraints","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/#Problem-2","page":"Tips and tricks","title":"Problem","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"Suppose we want to model that a certain linear inequality must be satisfied when some other event occurs, that is, for a binary variable z, we want to model the implication:","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"z = 1 implies a^top x leq b","category":"page"},{"location":"tutorials/linear/tips_and_tricks/#Trick-1-2","page":"Tips and tricks","title":"Trick 1","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"Some solvers have native support for indicator constraints. In addition, if the variables involved have finite domains, then JuMP can automatically reformulate an indicator into a mixed-integer program.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"Example x_1 + x_2 leq 1 if z = 1.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"model = Model();\n@variable(model, 0 <= x[1:2] <= 10)\n@variable(model, z, Bin)\n@constraint(model, z --> {sum(x) <= 1})","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"Example x_1 + x_2 leq 1 if z = 0.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"model = Model();\n@variable(model, 0 <= x[1:2] <= 10)\n@variable(model, z, Bin)\n@constraint(model, !z --> {sum(x) <= 1})","category":"page"},{"location":"tutorials/linear/tips_and_tricks/#Trick-2-2","page":"Tips and tricks","title":"Trick 2","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"If the solver doesn't support indicator constraints and the variables do not have a finite domain, you can use the big-M trick.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"Example x_1 + x_2 leq 1 if z = 1.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"model = Model();\n@variable(model, x[1:2] <= 10)\n@variable(model, z, Bin)\nM = 100\n@constraint(model, sum(x) <= 1 + M * (1 - z))","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"Example x_1 + x_2 leq 1 if z = 0.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"model = Model();\n@variable(model, x[1:2] <= 10)\n@variable(model, z, Bin)\nM = 100\n@constraint(model, sum(x) <= 1 + M * z)","category":"page"},{"location":"tutorials/linear/tips_and_tricks/#Semi-continuous-variables","page":"Tips and tricks","title":"Semi-continuous variables","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"A semi-continuous variable is a continuous variable between bounds lu that also can assume the value zero, that is: x in 0 cup lu","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"Example x in 0cup 1 2","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"model = Model();\n@variable(model, x in Semicontinuous(1.0, 2.0))","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"You can also represent a semi-continuous variable using the reformulation:","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"model = Model();\n@variable(model, x)\n@variable(model, z, Bin)\n@constraint(model, x <= 2 * z)\n@constraint(model, x >= 1 * z)","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"When z = 0 the two constraints are equivalent to 0 <= x <= 0. When z = 1, the two constraints are equivalent to 1 <= x <= 2.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/#Semi-integer-variables","page":"Tips and tricks","title":"Semi-integer variables","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"A semi-integer variable is a variable which assumes integer values between bounds lu and can also assume the value zero: x in 0 cup l u cap mathbbZ","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"model = Model();\n@variable(model, x in Semiinteger(5.0, 10.0))","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"You can also represent a semi-integer variable using the reformulation:","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"model = Model();\n@variable(model, x, Int)\n@variable(model, z, Bin)\n@constraint(model, x <= 10 * z)\n@constraint(model, x >= 5 * z)","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"When z = 0 the two constraints are equivalent to 0 <= x <= 0. When z = 1, the two constraints are equivalent to 5 <= x <= 10.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/#Special-Ordered-Sets-of-Type-1","page":"Tips and tricks","title":"Special Ordered Sets of Type 1","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"A Special Ordered Set of Type 1 is a set of variables, at most one of which can take a non-zero value, all others being at 0.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"They most frequently apply where a set of variables are actually binary variables. In other words, we have to choose at most one from a set of possibilities.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"model = Model();\n@variable(model, x[1:3], Bin)\n@constraint(model, x in SOS1())","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"You can optionally pass SOS1 a weight vector like","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"@constraint(model, x in SOS1([0.2, 0.5, 0.3]))","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"If the decision variables are related and have a physical ordering, then the weight vector, although not used directly in the constraint, can help the solver make a better decision in the solution process.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/#tip_sos2","page":"Tips and tricks","title":"Special Ordered Sets of Type 2","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"A Special Ordered Set of type 2 is a set of non-negative variables, of which at most two can be non-zero, and if two are non-zero these must be consecutive in their ordering.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"model = Model();\n@variable(model, x[1:3])\n@constraint(model, x in SOS2([3.0, 1.0, 2.0]))","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"The ordering provided by the weight vector is more important in this case as the variables need to be consecutive according to the ordering. For example, in the above constraint, the possible pairs are:","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"Consecutive\n(x[1] and x[3]) as they correspond to 3 and 2 resp. and thus can be non-zero\n(x[2] and x[3]) as they correspond to 1 and 2 resp. and thus can be non-zero\nNon-consecutive\n(x[1] and x[2]) as they correspond to 3 and 1 resp. and thus cannot be non-zero","category":"page"},{"location":"tutorials/linear/tips_and_tricks/#Piecewise-linear-approximations","page":"Tips and tricks","title":"Piecewise linear approximations","text":"","category":"section"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"SOSII constraints are most often used to form piecewise linear approximations of a function.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"Given a set of points for x:","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"x̂ = -1:0.5:2","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"and a set of corresponding points for y:","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"ŷ = x̂ .^ 2","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"the piecewise linear approximation is constructed by representing x and y as convex combinations of x̂ and ŷ.","category":"page"},{"location":"tutorials/linear/tips_and_tricks/","page":"Tips and tricks","title":"Tips and tricks","text":"N = length(x̂)\nmodel = Model();\n@variable(model, -1 <= x <= 2)\n@variable(model, y)\n@variable(model, 0 <= λ[1:N] <= 1)\n@objective(model, Max, y)\n@constraints(model, begin\n x == sum(x̂[i] * λ[i] for i in 1:N)\n y == sum(ŷ[i] * λ[i] for i in 1:N)\n sum(λ) == 1\n λ in SOS2()\nend)","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"EditURL = \"web_app.jl\"","category":"page"},{"location":"tutorials/applications/web_app/#Serving-web-apps","page":"Serving web apps","title":"Serving web apps","text":"","category":"section"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"This tutorial was generated using Literate.jl. Download the source as a .jl file.","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"This tutorial demonstrates how to setup and serve JuMP models via a REST API.","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"In the example app we are building, we solve a trivial mixed-integer program, which is parameterized by the lower bound of a variable. To call the service, users send an HTTP POST request with JSON contents indicating the lower bound. The returned value is the solution of the mixed-integer program as JSON.","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"First, we need JuMP and a solver:","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"using JuMP\nimport HiGHS","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"We also need HTTP.jl to act as our REST server, and JSON.jl to marshal data.","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"import HTTP\nimport JSON","category":"page"},{"location":"tutorials/applications/web_app/#The-server-side","page":"Serving web apps","title":"The server side","text":"","category":"section"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"The core components of our REST server are endpoints. These are functions which accept a Dict{String,Any} of input parameters, and return a Dict{String,Any} as output. The types are Dict{String,Any} because we're going to read these to and from JSON.","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"Here's a very simple endpoint: it accepts params as input, formulates and solves a trivial mixed-integer program, and then returns a dictionary with the result.","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"function endpoint_solve(params::Dict{String,Any})\n if !haskey(params, \"lower_bound\")\n return Dict{String,Any}(\n \"status\" => \"failure\",\n \"reason\" => \"missing lower_bound param\",\n )\n elseif !(params[\"lower_bound\"] isa Real)\n return Dict{String,Any}(\n \"status\" => \"failure\",\n \"reason\" => \"lower_bound is not a number\",\n )\n end\n model = Model(HiGHS.Optimizer)\n set_silent(model)\n @variable(model, x >= params[\"lower_bound\"], Int)\n optimize!(model)\n ret = Dict{String,Any}(\n \"status\" => \"okay\",\n \"terminaton_status\" => termination_status(model),\n \"primal_status\" => primal_status(model),\n )\n # Only include the `x` key if it has a value.\n if primal_status(model) == FEASIBLE_POINT\n ret[\"x\"] = value(x)\n end\n return ret\nend","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"When we call this, we get:","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"endpoint_solve(Dict{String,Any}(\"lower_bound\" => 1.2))","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"endpoint_solve(Dict{String,Any}())","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"For a second function, we need a function that accepts an HTTP.Request object and returns an HTTP.Response object.","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"function serve_solve(request::HTTP.Request)\n data = JSON.parse(String(request.body))\n solution = endpoint_solve(data)\n return HTTP.Response(200, JSON.json(solution))\nend","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"Finally, we need an HTTP server. There are a variety of ways you can do this in HTTP.jl. We use an explicit Sockets.listen so we have manual control of when we shutdown the server.","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"function setup_server(host, port)\n server = HTTP.Sockets.listen(host, port)\n HTTP.serve!(host, port; server = server) do request\n try\n # Extend the server by adding other endpoints here.\n if request.target == \"/api/solve\"\n return serve_solve(request)\n else\n return HTTP.Response(404, \"target $(request.target) not found\")\n end\n catch err\n # Log details about the exception server-side\n @info \"Unhandled exception: $err\"\n # Return a response to the client\n return HTTP.Response(500, \"internal error\")\n end\n end\n return server\nend","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"warning: Warning\nHTTP.jl does not serve requests on a separate thread. Therefore, a long-running job will block the main thread, preventing concurrent users from submitting requests. To work-around this, read HTTP.jl issue 798 or watch Building Microservices and Applications in Julia from JuliaCon 2020.","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"server = setup_server(HTTP.ip\"127.0.0.1\", 8080)","category":"page"},{"location":"tutorials/applications/web_app/#The-client-side","page":"Serving web apps","title":"The client side","text":"","category":"section"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"Now that we have a server, we can send it requests via this function:","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"function send_request(data::Dict; endpoint::String = \"solve\")\n ret = HTTP.request(\n \"POST\",\n # This should match the URL and endpoint we defined for our server.\n \"http://127.0.0.1:8080/api/$endpoint\",\n [\"Content-Type\" => \"application/json\"],\n JSON.json(data),\n )\n if ret.status != 200\n # This could happen if there are time-outs, network errors, etc.\n return Dict(\n \"status\" => \"failure\",\n \"code\" => ret.status,\n \"body\" => String(ret.body),\n )\n end\n return JSON.parse(String(ret.body))\nend","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"Let's see what happens:","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"send_request(Dict(\"lower_bound\" => 0))","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"send_request(Dict(\"lower_bound\" => 1.2))","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"If we don't send a lower_bound, we get:","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"send_request(Dict(\"invalid_param\" => 1.2))","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"If we don't send a lower_bound that is a number, we get:","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"send_request(Dict(\"lower_bound\" => \"1.2\"))","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"Finally, we can shutdown our HTTP server:","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"close(server)","category":"page"},{"location":"tutorials/applications/web_app/#Next-steps","page":"Serving web apps","title":"Next steps","text":"","category":"section"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"For more complicated examples relating to HTTP servers, consult the HTTP.jl documentation.","category":"page"},{"location":"tutorials/applications/web_app/","page":"Serving web apps","title":"Serving web apps","text":"To see how you can integrate this with a larger JuMP model, read Design patterns for larger models.","category":"page"},{"location":"packages/SDPT3/","page":"jump-dev/SDPT3.jl","title":"jump-dev/SDPT3.jl","text":"EditURL = \"https://github.com/jump-dev/SDPT3.jl/blob/b565aac2a58818090d521f2340e71f597688e4fb/README.md\"","category":"page"},{"location":"packages/SDPT3/#SDPT3.jl","page":"jump-dev/SDPT3.jl","title":"SDPT3.jl","text":"","category":"section"},{"location":"packages/SDPT3/","page":"jump-dev/SDPT3.jl","title":"jump-dev/SDPT3.jl","text":"SDPT3.jl is wrapper for the SDPT3 solver.","category":"page"},{"location":"packages/SDPT3/","page":"jump-dev/SDPT3.jl","title":"jump-dev/SDPT3.jl","text":"The wrapper has two components:","category":"page"},{"location":"packages/SDPT3/","page":"jump-dev/SDPT3.jl","title":"jump-dev/SDPT3.jl","text":"an exported sdpt3 function that is a thin wrapper on top of the sdpt3 MATLAB function\nan interface to MathOptInterface","category":"page"},{"location":"packages/SDPT3/#Affiliation","page":"jump-dev/SDPT3.jl","title":"Affiliation","text":"","category":"section"},{"location":"packages/SDPT3/","page":"jump-dev/SDPT3.jl","title":"jump-dev/SDPT3.jl","text":"This wrapper is maintained by the JuMP community and is not an official wrapper of SDPT3.","category":"page"},{"location":"packages/SDPT3/#License","page":"jump-dev/SDPT3.jl","title":"License","text":"","category":"section"},{"location":"packages/SDPT3/","page":"jump-dev/SDPT3.jl","title":"jump-dev/SDPT3.jl","text":"SDPT3.jl is licensed under the MIT License.","category":"page"},{"location":"packages/SDPT3/","page":"jump-dev/SDPT3.jl","title":"jump-dev/SDPT3.jl","text":"The underlying solver, SDPT3 is licensed under the GPL v2 License.","category":"page"},{"location":"packages/SDPT3/","page":"jump-dev/SDPT3.jl","title":"jump-dev/SDPT3.jl","text":"In addition, SDPT3 requires an installation of MATLAB, which is a closed-source commercial product for which you must obtain a license.","category":"page"},{"location":"packages/SDPT3/#Use-with-JuMP","page":"jump-dev/SDPT3.jl","title":"Use with JuMP","text":"","category":"section"},{"location":"packages/SDPT3/","page":"jump-dev/SDPT3.jl","title":"jump-dev/SDPT3.jl","text":"To use SDPT3 with JuMP, do:","category":"page"},{"location":"packages/SDPT3/","page":"jump-dev/SDPT3.jl","title":"jump-dev/SDPT3.jl","text":"using JuMP, SDPT3\nmodel = Model(SDPT3.Optimizer)\nset_attribute(model, \"printlevel\", 0)","category":"page"},{"location":"packages/SDPT3/#Installation","page":"jump-dev/SDPT3.jl","title":"Installation","text":"","category":"section"},{"location":"packages/SDPT3/","page":"jump-dev/SDPT3.jl","title":"jump-dev/SDPT3.jl","text":"First, make sure that you satisfy the requirements of the MATLAB.jl Julia package, and that the SeDuMi software is installed in your MATLAB™ installation.","category":"page"},{"location":"packages/SDPT3/","page":"jump-dev/SDPT3.jl","title":"jump-dev/SDPT3.jl","text":"Then, install SDPT3.jl using Pkg.add:","category":"page"},{"location":"packages/SDPT3/","page":"jump-dev/SDPT3.jl","title":"jump-dev/SDPT3.jl","text":"import Pkg\nPkg.add(\"SDPT3\")","category":"page"},{"location":"packages/SDPT3/#SDPT3-not-in-PATH","page":"jump-dev/SDPT3.jl","title":"SDPT3 not in PATH","text":"","category":"section"},{"location":"packages/SDPT3/","page":"jump-dev/SDPT3.jl","title":"jump-dev/SDPT3.jl","text":"If you get the error:","category":"page"},{"location":"packages/SDPT3/","page":"jump-dev/SDPT3.jl","title":"jump-dev/SDPT3.jl","text":"Error using save\nVariable 'jx_sdpt3_arg_out_1' not found.\n\nERROR: LoadError: MATLAB.MEngineError(\"failed to get variable jx_sdpt3_arg_out_1 from MATLAB session\")\nStacktrace:\n[...]","category":"page"},{"location":"packages/SDPT3/","page":"jump-dev/SDPT3.jl","title":"jump-dev/SDPT3.jl","text":"The error means that we could not find the sdpt3 function with one output argument using the MATLAB C API. This most likely means that you did not add SDPT3 to the MATLAB's path (that is, the toolbox/local/pathdef.m file).","category":"page"},{"location":"packages/SDPT3/","page":"jump-dev/SDPT3.jl","title":"jump-dev/SDPT3.jl","text":"If modifying toolbox/local/pathdef.m does not work, the following should work, where /path/to/sdpt3/ is the directory where the sdpt3 folder is located:","category":"page"},{"location":"packages/SDPT3/","page":"jump-dev/SDPT3.jl","title":"jump-dev/SDPT3.jl","text":"julia> using MATLAB\n\njulia> cd(\"/path/to/sdpt3/\") do\n MATLAB.mat\"install_sdpt3\"\n end\n\njulia> MATLAB.mat\"savepath\"","category":"page"},{"location":"packages/SDPT3/","page":"jump-dev/SDPT3.jl","title":"jump-dev/SDPT3.jl","text":"An alternative fix is suggested in the following issue.","category":"page"},{"location":"packages/SDPT3/#Error-in-validate","page":"jump-dev/SDPT3.jl","title":"Error in validate","text":"","category":"section"},{"location":"packages/SDPT3/","page":"jump-dev/SDPT3.jl","title":"jump-dev/SDPT3.jl","text":"If you get the error:","category":"page"},{"location":"packages/SDPT3/","page":"jump-dev/SDPT3.jl","title":"jump-dev/SDPT3.jl","text":"Brace indexing is not supported for variables of this type.\n\nError in validate\n\nError in sdpt3 (line 171)\n [blk,At,C,b,blkdim,numblk,parbarrier] = validate(blk,At,C,b,par,parbarrier);\n\nError using save\nVariable 'jx_sdpt3_arg_out_1' not found.","category":"page"},{"location":"packages/SDPT3/","page":"jump-dev/SDPT3.jl","title":"jump-dev/SDPT3.jl","text":"It might mean that you have added SDPNAL in addition to SDPT3 in the MATLAB's path (that is, the toolbox/local/pathdef.m file). Because SDPNAL also defines a validate function, this can make sdpt3 call SDPNAL's validate function instead of SDPT3's validate function, which causes the issue.","category":"page"},{"location":"packages/SDPT3/","page":"jump-dev/SDPT3.jl","title":"jump-dev/SDPT3.jl","text":"One way to fix this from the Julia REPL is to reset the search path to the factory-installed state using restoredefaultpath:","category":"page"},{"location":"packages/SDPT3/","page":"jump-dev/SDPT3.jl","title":"jump-dev/SDPT3.jl","text":"julia> using MATLAB\n\njulia> MATLAB.restoredefaultpath()\n\njulia> MATLAB.mat\"savepath\"","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"EditURL = \"dualization.jl\"","category":"page"},{"location":"tutorials/conic/dualization/#Dualization","page":"Dualization","title":"Dualization","text":"","category":"section"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"This tutorial was generated using Literate.jl. Download the source as a .jl file.","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"The purpose of this tutorial is to explain how to use Dualization.jl to improve the performance of some conic optimization models.","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"There are two important takeaways:","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"JuMP reformulates problems to meet the input requirements of the solver, potentially increasing the problem size by adding slack variables and constraints.\nSolving the dual of a conic model can be more efficient than solving the primal.","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"Dualization.jl is a package which fixes these problems, allowing you to solve the dual instead of the primal with a one-line change to your code.","category":"page"},{"location":"tutorials/conic/dualization/#Required-packages","page":"Dualization","title":"Required packages","text":"","category":"section"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"This tutorial uses the following packages:","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"using JuMP\nimport Dualization\nimport SCS","category":"page"},{"location":"tutorials/conic/dualization/#Background","page":"Dualization","title":"Background","text":"","category":"section"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"Conic optimization solvers typically accept one of two input formulations.","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"The first is the standard conic form:","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"beginalign\n min_x in mathbbR^n c^top x \n textst A x = b \n x in mathcalK\nendalign","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"in which we have a set of linear equality constraints Ax = b and the variables belong to a cone mathcalK.","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"The second is the geometric conic form:","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"beginalign\n min_x in mathbbR^n c^top x \n textst A x - b in mathcalK\nendalign","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"in which an affine function Ax - b belongs to a cone mathcalK and the variables are free.","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"It is trivial to convert between these two representations, for example, to go from the geometric conic form to the standard conic form we introduce slack variables y:","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"beginalign\n min_x in mathbbR^n c^top x \n textst beginbmatrixA -Iendbmatrix beginbmatrixxyendbmatrix = b \n beginbmatrixxyendbmatrix in mathbbR^n times mathcalK\nendalign","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"and to go from the standard conic form to the geometric conic form, we can rewrite the equality constraint as a function belonging to the {0} cone:","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"beginalign\n min_x in mathbbR^n c^top x \n textst beginbmatrixAIendbmatrix x - beginbmatrixb0endbmatrix in 0 times mathcalK\nendalign","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"From a theoretical perspective, the two formulations are equivalent, and if you implement a model in the standard conic form and pass it to a geometric conic form solver (or vice versa), then JuMP will automatically reformulate the problem into the correct formulation.","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"From a practical perspective though, the reformulations are problematic because the additional slack variables and constraints can make the problem much larger and therefore harder to solve.","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"You should also note many problems contain a mix of conic constraints and variables, and so they do not neatly fall into one of the two formulations. In these cases, JuMP reformulates only the variables and constraints as necessary to convert the problem into the desired form.","category":"page"},{"location":"tutorials/conic/dualization/#Primal-and-dual-formulations","page":"Dualization","title":"Primal and dual formulations","text":"","category":"section"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"Duality plays a large role in conic optimization. For a detailed description of conic duality, see Duality.","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"A useful observation is that if the primal problem is in standard conic form, then the dual problem is in geometric conic form, and vice versa. Moreover, the primal and dual may have a different number of variables and constraints, although which one is smaller depends on the problem. Therefore, instead of reformulating the problem from one form to the other, it can be more efficient to solve the dual instead of the primal.","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"To demonstrate, we use a variation of the Maximum cut via SDP example.","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"The primal formulation (in standard conic form) is:","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"model_primal = Model()\n@variable(model_primal, X[1:2, 1:2], PSD)\n@objective(model_primal, Max, sum([1 -1; -1 1] .* X))\n@constraint(model_primal, primal_c[i = 1:2], 1 - X[i, i] == 0)\nprint(model_primal)","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"This problem has three scalar decision variables (the matrix X is symmetric), two scalar equality constraints, and a constraint that X is positive semidefinite.","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"The dual of model_primal is:","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"model_dual = Model()\n@variable(model_dual, y[1:2])\n@objective(model_dual, Min, sum(y))\n@constraint(model_dual, dual_c, [y[1]-1 1; 1 y[2]-1] in PSDCone())\nprint(model_dual)","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"This problem has two scalar decision variables, and a 2x2 positive semidefinite matrix constraint.","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"tip: Tip\nIf you haven't seen conic duality before, try deriving the dual problem based on the description in Duality. You'll need to know that the dual cone of PSDCone is the PSDCone.","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"When we solve model_primal with SCS.Optimizer, SCS reports three variables (variables n: 3), five rows in the constraint matrix (constraints m: 5), and five non-zeros in the matrix (nnz(A): 5):","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"set_optimizer(model_primal, SCS.Optimizer)\noptimize!(model_primal)\n@assert is_solved_and_feasible(model_primal; dual = true)","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"(There are five rows in the constraint matrix because SCS expects problems in geometric conic form, and so JuMP has reformulated the X, PSD variable constraint into the affine constraint X .+ 0 in PSDCone().)","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"The solution we obtain is:","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"value.(X)","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"dual.(primal_c)","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"objective_value(model_primal)","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"When we solve model_dual with SCS.Optimizer, SCS reports two variables (variables n: 2), three rows in the constraint matrix (constraints m: 3), and two non-zeros in the matrix (nnz(A): 2):","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"set_optimizer(model_dual, SCS.Optimizer)\noptimize!(model_dual)\n@assert is_solved_and_feasible(model_dual; dual = true)","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"and the solution we obtain is:","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"dual.(dual_c)","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"value.(y)","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"objective_value(model_dual)","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"This particular problem is small enough that it isn't meaningful to compare the solve times, but in general, we should expect model_dual to solve faster than model_primal because it contains fewer variables and constraints. The difference is particularly noticeable on large-scale optimization problems.","category":"page"},{"location":"tutorials/conic/dualization/#dual_optimizer","page":"Dualization","title":"dual_optimizer","text":"","category":"section"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"Manually deriving the conic dual is difficult and error-prone. The package Dualization.jl provides the Dualization.dual_optimizer meta-solver, which wraps any MathOptInterface-compatible solver in an interface that automatically formulates and solves the dual of an input problem.","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"To demonstrate, we use Dualization.dual_optimizer to solve model_primal:","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"set_optimizer(model_primal, Dualization.dual_optimizer(SCS.Optimizer))\noptimize!(model_primal)\n@assert is_solved_and_feasible(model_primal; dual = true)","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"The performance is the same as if we solved model_dual, and the correct solution is returned to X:","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"value.(X)","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"dual.(primal_c)","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"Moreover, if we use dual_optimizer on model_dual, then we get the same performance as if we had solved model_primal:","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"set_optimizer(model_dual, Dualization.dual_optimizer(SCS.Optimizer))\noptimize!(model_dual)\n@assert is_solved_and_feasible(model_dual; dual = true)","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"dual.(dual_c)","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"value.(y)","category":"page"},{"location":"tutorials/conic/dualization/#A-mixed-example","page":"Dualization","title":"A mixed example","text":"","category":"section"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"The Maximum cut via SDP example is nicely defined because the primal is in standard conic form and the dual is in geometric conic form. However, many practical models contain a mix of the two formulations. One example is The minimum distortion problem:","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"D = [0 1 1 1; 1 0 2 2; 1 2 0 2; 1 2 2 0]\nmodel = Model()\n@variable(model, c²)\n@variable(model, Q[1:4, 1:4], PSD)\n@objective(model, Min, c²)\nfor i in 1:4, j in (i+1):4\n @constraint(model, D[i, j]^2 <= Q[i, i] + Q[j, j] - 2 * Q[i, j])\n @constraint(model, Q[i, i] + Q[j, j] - 2 * Q[i, j] <= c² * D[i, j]^2)\nend\n@constraint(model, Q[1, 1] == 0)\n@constraint(model, c² >= 1)","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"In this formulation, the Q variable is of the form xinmathcalK, but there is also a free variable, c², a linear equality constraint, Q[1, 1] == 0, and some linear inequality constraints. Rather than attempting to derive the formulation that JuMP would pass to SCS and its dual, the simplest solution is to try solving the problem with and without dual_optimizer to see which formulation is most efficient.","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"set_optimizer(model, SCS.Optimizer)\noptimize!(model)","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"set_optimizer(model, Dualization.dual_optimizer(SCS.Optimizer))\noptimize!(model)","category":"page"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"For this problem, SCS reports that the primal has variables n: 11, constraints m: 24 and that the dual has variables n: 14, constraints m: 24. Therefore, we should probably use the primal formulation because it has fewer variables and the same number of constraints.","category":"page"},{"location":"tutorials/conic/dualization/#When-to-use-dual_optimizer","page":"Dualization","title":"When to use dual_optimizer","text":"","category":"section"},{"location":"tutorials/conic/dualization/","page":"Dualization","title":"Dualization","text":"Because it can make the problem larger or smaller, depending on the problem and the choice of solver, there is no definitive rule on when you should use dual_optimizer. However, you should try dual_optimizer if your conic optimization problem takes a long time to solve, or if you need to repeatedly solve similarly structured problems with different data. In some cases solving the dual instead of the primal can make a large difference.","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"EditURL = \"ellipse_approx.jl\"","category":"page"},{"location":"tutorials/conic/ellipse_approx/#Example:-ellipsoid-approximation","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"","category":"section"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"This tutorial was generated using Literate.jl. Download the source as a .jl file.","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"This tutorial considers the problem of computing extremal ellipsoids: finding ellipsoids that best approximate a given set. As an extension, we show how to use JuMP to inspect the bridges that were used, and how to explore alternative formulations.","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"The model comes from Section 4.9 of (Ben-Tal and Nemirovski, 2001).","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"For a related example, see also the Example: minimal ellipses tutorial.","category":"page"},{"location":"tutorials/conic/ellipse_approx/#Required-packages","page":"Example: ellipsoid approximation","title":"Required packages","text":"","category":"section"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"This tutorial uses the following packages:","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"using JuMP\nimport LinearAlgebra\nimport Plots\nimport Random\nimport SCS\nimport Test","category":"page"},{"location":"tutorials/conic/ellipse_approx/#Problem-formulation","page":"Example: ellipsoid approximation","title":"Problem formulation","text":"","category":"section"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"Suppose that we are given a set mathcalS consisting of m points in n-dimensional space:","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"mathcalS = x_1 ldots x_m subset mathbbR^n","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"Our goal is to determine an optimal vector c in mathbbR^n and an optimal n times n real symmetric matrix D such that the ellipse:","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"E(D c) = x (x - c)^top D ( x - c) leq 1 ","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"contains mathcalS and has the smallest possible volume.","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"The optimal D and c are given by the optimization problem:","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"beginaligned\nmax quad t \ntextst quad Z succeq 0 \n beginbmatrix s z^top z Z endbmatrix succeq 0 \n x_i^top Z x_i - 2x_i^top z + s leq 1 quad i=1 ldots m \n t le sqrtndet(Z)\nendaligned","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"where D = Z_* and c = Z_*^-1 z_*.","category":"page"},{"location":"tutorials/conic/ellipse_approx/#Data","page":"Example: ellipsoid approximation","title":"Data","text":"","category":"section"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"We first need to generate some points to work with.","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"function generate_point_cloud(\n m; # number of 2-dimensional points\n a = 10, # scaling in x direction\n b = 2, # scaling in y direction\n rho = π / 6, # rotation of points around origin\n random_seed = 1,\n)\n rng = Random.MersenneTwister(random_seed)\n P = randn(rng, Float64, m, 2)\n Phi = [a*cos(rho) a*sin(rho); -b*sin(rho) b*cos(rho)]\n S = P * Phi\n return S\nend","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"For the sake of this example, let's take m = 600:","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"S = generate_point_cloud(600);\nnothing #hide","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"We will visualise the points (and ellipse) using the Plots package:","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"r = 1.1 * maximum(abs.(S))\nplot = Plots.scatter(\n S[:, 1],\n S[:, 2];\n xlim = (-r, r),\n ylim = (-r, r),\n label = nothing,\n c = :green,\n shape = :x,\n size = (600, 600),\n)","category":"page"},{"location":"tutorials/conic/ellipse_approx/#JuMP-formulation","page":"Example: ellipsoid approximation","title":"JuMP formulation","text":"","category":"section"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"Now let's build and the JuMP model. We'll compute D and c after the solve.","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"model = Model(SCS.Optimizer)\n# We need to use a tighter tolerance for this example, otherwise the bounding\n# ellipse won't actually be bounding...\nset_attribute(model, \"eps_rel\", 1e-7)\nset_silent(model)\nm, n = size(S)\n@variable(model, z[1:n])\n@variable(model, Z[1:n, 1:n], PSD)\n@variable(model, s)\n@variable(model, t)\n@constraint(model, [s z'; z Z] >= 0, PSDCone())\n@constraint(\n model,\n [i in 1:m],\n S[i, :]' * Z * S[i, :] - 2 * S[i, :]' * z + s <= 1,\n)\n@constraint(model, [t; vec(Z)] in MOI.RootDetConeSquare(n))\n@objective(model, Max, t)\noptimize!(model)\nTest.@test is_solved_and_feasible(model)\nsolution_summary(model)","category":"page"},{"location":"tutorials/conic/ellipse_approx/#Results","page":"Example: ellipsoid approximation","title":"Results","text":"","category":"section"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"After solving the model to optimality we can recover the solution in terms of D and c:","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"D = value.(Z)","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"c = D \\ value.(z)","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"We can check that each point lies inside the ellipsoid, by checking if the largest normalized radius is less than 1:","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"largest_radius = maximum(map(x -> (x - c)' * D * (x - c), eachrow(S)))","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"Finally, overlaying the solution in the plot we see the minimal volume approximating ellipsoid:","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"P = sqrt(D)\nq = -P * c\ndata = [tuple(P \\ [cos(θ) - q[1], sin(θ) - q[2]]...) for θ in 0:0.05:(2pi+0.05)]\nPlots.plot!(plot, data; c = :crimson, label = nothing)","category":"page"},{"location":"tutorials/conic/ellipse_approx/#Alternative-formulations","page":"Example: ellipsoid approximation","title":"Alternative formulations","text":"","category":"section"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"The formulation of model uses MOI.RootDetConeSquare. However, because SCS does not natively support this cone, JuMP automatically reformulates the problem into an equivalent problem that SCS does support. You can see the reformulation that JuMP chose using print_active_bridges:","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"print_active_bridges(model)","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"There's a lot going on here, but the first bullet is:","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"* Unsupported objective: MOI.VariableIndex\n| bridged by:\n| MOIB.Objective.FunctionizeBridge{Float64}\n| introduces:\n| * Supported objective: MOI.ScalarAffineFunction{Float64}","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"This says that SCS does not support a MOI.VariableIndex objective function, and that JuMP used a MOI.Bridges.Objective.FunctionizeBridge to convert it into a MOI.ScalarAffineFunction{Float64} objective function.","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"We can leave JuMP to do the reformulation, or we can rewrite our model to have an objective function that SCS natively supports:","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"@objective(model, Max, 1.0 * t + 0.0);\nnothing #hide","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"Re-printing the active bridges:","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"print_active_bridges(model)","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"we get * Supported objective: MOI.ScalarAffineFunction{Float64}.","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"We can manually implement some other reformulations to change our model to something that SCS more closely supports by:","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"Replacing the MOI.VectorOfVariables in MOI.PositiveSemidefiniteConeTriangle constraint @variable(model, Z[1:n, 1:n], PSD) with the MOI.VectorAffineFunction in MOI.PositiveSemidefiniteConeTriangle @constraint(model, Z >= 0, PSDCone()).\nReplacing the MOI.VectorOfVariables in MOI.PositiveSemidefiniteConeSquare constraint [s z'; z Z] >= 0, PSDCone() with the MOI.VectorAffineFunction in MOI.PositiveSemidefiniteConeTriangle @constraint(model, LinearAlgebra.Symmetric([s z'; z Z]) >= 0, PSDCone()).\nReplacing the MOI.ScalarAffineFunction in MOI.GreaterThan constraints with the vectorized equivalent of MOI.VectorAffineFunction in MOI.Nonnegatives\nReplacing the MOI.VectorOfVariables in MOI.RootDetConeSquare constraint with MOI.VectorAffineFunction in MOI.RootDetConeTriangle.","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"Note that we still need to bridge MOI.PositiveSemidefiniteConeTriangle constraints because SCS uses an internal SCS.ScaledPSDCone set instead.","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"model = Model(SCS.Optimizer)\nset_attribute(model, \"eps_rel\", 1e-6)\nset_silent(model)\n@variable(model, z[1:n])\n@variable(model, s)\n@variable(model, t)\n# The former @variable(model, Z[1:n, 1:n], PSD)\n@variable(model, Z[1:n, 1:n], Symmetric)\n@constraint(model, Z >= 0, PSDCone())\n# The former [s z'; z Z] >= 0, PSDCone()\n@constraint(model, LinearAlgebra.Symmetric([s z'; z Z]) >= 0, PSDCone())\n# The former constraint S[i, :]' * Z * S[i, :] - 2 * S[i, :]' * z + s <= 1\nf = [1 - S[i, :]' * Z * S[i, :] + 2 * S[i, :]' * z - s for i in 1:m]\n@constraint(model, f in MOI.Nonnegatives(m))\n# The former constraint [t; vec(Z)] in MOI.RootDetConeSquare(n)\n@constraint(model, 1 * [t; triangle_vec(Z)] .+ 0 in MOI.RootDetConeTriangle(n))\n# The former @objective(model, Max, t)\n@objective(model, Max, 1 * t + 0)\noptimize!(model)\nTest.@test is_solved_and_feasible(model)\nsolve_time_1 = solve_time(model)","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"This formulation gives the much smaller graph:","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"print_active_bridges(model)","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"The last bullet shows how JuMP reformulated the MOI.RootDetConeTriangle constraint by adding a mix of MOI.PositiveSemidefiniteConeTriangle and MOI.GeometricMeanCone constraints.","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"Because SCS doesn't natively support the MOI.GeometricMeanCone, these constraints were further bridged using a MOI.Bridges.Constraint.GeoMeanToPowerBridge to a series of MOI.PowerCone constraints.","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"However, there are many other ways that a MOI.GeometricMeanCone can be reformulated into something that SCS supports. Let's see what happens if we use remove_bridge to remove the MOI.Bridges.Constraint.GeoMeanToPowerBridge:","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"remove_bridge(model, MOI.Bridges.Constraint.GeoMeanToPowerBridge)\noptimize!(model)\nTest.@test is_solved_and_feasible(model)","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"This time, the solve took:","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"solve_time_2 = solve_time(model)","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"where previously it took","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"solve_time_1","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"Why was the solve time different?","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"print_active_bridges(model)","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"This time, JuMP used a MOI.Bridges.Constraint.GeoMeanBridge to reformulate the constraint into a set of MOI.RotatedSecondOrderCone constraints, which were further reformulated into a set of supported MOI.SecondOrderCone constraints.","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"Since the two models are equivalent, we can conclude that for this particular model, the MOI.SecondOrderCone formulation is more efficient.","category":"page"},{"location":"tutorials/conic/ellipse_approx/","page":"Example: ellipsoid approximation","title":"Example: ellipsoid approximation","text":"In general though, the performance of a particular reformulation is problem- and solver-specific. Therefore, JuMP chooses to minimize the number of bridges in the default reformulation, leaving you to explore alternative formulations using the tools and techniques shown in this tutorial.","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"DocTestSetup = quote\n using JuMP\nend","category":"page"},{"location":"manual/containers/#Containers","page":"Containers","title":"Containers","text":"","category":"section"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"JuMP provides specialized containers similar to AxisArrays that enable multi-dimensional arrays with non-integer indices.","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"These containers are created automatically by JuMP's macros. Each macro has the same basic syntax:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"@macroname(model, name[key1=index1, index2; optional_condition], other stuff)","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"The containers are generated by the name[key1=index1, index2; optional_condition] syntax. Everything else is specific to the particular macro.","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"Containers can be named, for example, name[key=index], or unnamed, for example, [key=index]. We call unnamed containers anonymous.","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"We call the bits inside the square brackets and before the ; the index sets. The index sets can be named, for example, [i = 1:4], or they can be unnamed, for example, [1:4].","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"We call the bit inside the square brackets and after the ; the condition. Conditions are optional.","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"In addition to the standard JuMP macros like @variable and @constraint, which construct containers of variables and constraints respectively, you can use Containers.@container to construct containers with arbitrary elements.","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"We will use this macro to explain the three types of containers that are natively supported by JuMP: Array, Containers.DenseAxisArray, and Containers.SparseAxisArray.","category":"page"},{"location":"manual/containers/#Array","page":"Containers","title":"Array","text":"","category":"section"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"An Array is created when the index sets are rectangular and the index sets are of the form 1:n.","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> Containers.@container(x[i = 1:2, j = 1:3], (i, j))\n2×3 Matrix{Tuple{Int64, Int64}}:\n (1, 1) (1, 2) (1, 3)\n (2, 1) (2, 2) (2, 3)","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"The result is a normal Julia Array, so you can do all the usual things.","category":"page"},{"location":"manual/containers/#Slicing","page":"Containers","title":"Slicing","text":"","category":"section"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"Arrays can be sliced","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> x[:, 1]\n2-element Vector{Tuple{Int64, Int64}}:\n (1, 1)\n (2, 1)\n\njulia> x[2, :]\n3-element Vector{Tuple{Int64, Int64}}:\n (2, 1)\n (2, 2)\n (2, 3)","category":"page"},{"location":"manual/containers/#Looping","page":"Containers","title":"Looping","text":"","category":"section"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"Use eachindex to loop over the elements:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> for key in eachindex(x)\n println(x[key])\n end\n(1, 1)\n(2, 1)\n(1, 2)\n(2, 2)\n(1, 3)\n(2, 3)","category":"page"},{"location":"manual/containers/#Get-the-index-sets","page":"Containers","title":"Get the index sets","text":"","category":"section"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"Use axes to obtain the index sets:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> axes(x)\n(Base.OneTo(2), Base.OneTo(3))","category":"page"},{"location":"manual/containers/#Broadcasting","page":"Containers","title":"Broadcasting","text":"","category":"section"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"Broadcasting over an Array returns an Array","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> swap(x::Tuple) = (last(x), first(x))\nswap (generic function with 1 method)\n\njulia> swap.(x)\n2×3 Matrix{Tuple{Int64, Int64}}:\n (1, 1) (2, 1) (3, 1)\n (1, 2) (2, 2) (3, 2)","category":"page"},{"location":"manual/containers/#Tables","page":"Containers","title":"Tables","text":"","category":"section"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"Use Containers.rowtable to convert the Array into a Tables.jl compatible Vector{<:NamedTuple}:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> table = Containers.rowtable(x; header = [:I, :J, :value])\n6-element Vector{@NamedTuple{I::Int64, J::Int64, value::Tuple{Int64, Int64}}}:\n (I = 1, J = 1, value = (1, 1))\n (I = 2, J = 1, value = (2, 1))\n (I = 1, J = 2, value = (1, 2))\n (I = 2, J = 2, value = (2, 2))\n (I = 1, J = 3, value = (1, 3))\n (I = 2, J = 3, value = (2, 3))","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"Because it supports the Tables.jl interface, you can pass it to any function which accepts a table as input:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> import DataFrames;\n\njulia> DataFrames.DataFrame(table)\n6×3 DataFrame\n Row │ I J value\n │ Int64 Int64 Tuple…\n─────┼──────────────────────\n 1 │ 1 1 (1, 1)\n 2 │ 2 1 (2, 1)\n 3 │ 1 2 (1, 2)\n 4 │ 2 2 (2, 2)\n 5 │ 1 3 (1, 3)\n 6 │ 2 3 (2, 3)","category":"page"},{"location":"manual/containers/#DenseAxisArray","page":"Containers","title":"DenseAxisArray","text":"","category":"section"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"A Containers.DenseAxisArray is created when the index sets are rectangular, but not of the form 1:n. The index sets can be of any type.","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> x = Containers.@container([i = 1:2, j = [:A, :B]], (i, j))\n2-dimensional DenseAxisArray{Tuple{Int64, Symbol},2,...} with index sets:\n Dimension 1, Base.OneTo(2)\n Dimension 2, [:A, :B]\nAnd data, a 2×2 Matrix{Tuple{Int64, Symbol}}:\n (1, :A) (1, :B)\n (2, :A) (2, :B)","category":"page"},{"location":"manual/containers/#Slicing-2","page":"Containers","title":"Slicing","text":"","category":"section"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"DenseAxisArrays can be sliced","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> x[:, :A]\n1-dimensional DenseAxisArray{Tuple{Int64, Symbol},1,...} with index sets:\n Dimension 1, Base.OneTo(2)\nAnd data, a 2-element Vector{Tuple{Int64, Symbol}}:\n (1, :A)\n (2, :A)\n\njulia> x[1, :]\n1-dimensional DenseAxisArray{Tuple{Int64, Symbol},1,...} with index sets:\n Dimension 1, [:A, :B]\nAnd data, a 2-element Vector{Tuple{Int64, Symbol}}:\n (1, :A)\n (1, :B)","category":"page"},{"location":"manual/containers/#Looping-2","page":"Containers","title":"Looping","text":"","category":"section"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"Use eachindex to loop over the elements:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> for key in eachindex(x)\n println(x[key])\n end\n(1, :A)\n(2, :A)\n(1, :B)\n(2, :B)","category":"page"},{"location":"manual/containers/#Get-the-index-sets-2","page":"Containers","title":"Get the index sets","text":"","category":"section"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"Use axes to obtain the index sets:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> axes(x)\n(Base.OneTo(2), [:A, :B])","category":"page"},{"location":"manual/containers/#Broadcasting-2","page":"Containers","title":"Broadcasting","text":"","category":"section"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"Broadcasting over a DenseAxisArray returns a DenseAxisArray","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> swap(x::Tuple) = (last(x), first(x))\nswap (generic function with 1 method)\n\njulia> swap.(x)\n2-dimensional DenseAxisArray{Tuple{Symbol, Int64},2,...} with index sets:\n Dimension 1, Base.OneTo(2)\n Dimension 2, [:A, :B]\nAnd data, a 2×2 Matrix{Tuple{Symbol, Int64}}:\n (:A, 1) (:B, 1)\n (:A, 2) (:B, 2)","category":"page"},{"location":"manual/containers/#Access-internal-data","page":"Containers","title":"Access internal data","text":"","category":"section"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"Use Array(x) to copy the internal data array into a new Array:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> Array(x)\n2×2 Matrix{Tuple{Int64, Symbol}}:\n (1, :A) (1, :B)\n (2, :A) (2, :B)","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"To access the internal data without a copy, use x.data.","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> x.data\n2×2 Matrix{Tuple{Int64, Symbol}}:\n (1, :A) (1, :B)\n (2, :A) (2, :B)","category":"page"},{"location":"manual/containers/#Tables-2","page":"Containers","title":"Tables","text":"","category":"section"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"Use Containers.rowtable to convert the DenseAxisArray into a Tables.jl compatible Vector{<:NamedTuple}:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> table = Containers.rowtable(x; header = [:I, :J, :value])\n4-element Vector{@NamedTuple{I::Int64, J::Symbol, value::Tuple{Int64, Symbol}}}:\n (I = 1, J = :A, value = (1, :A))\n (I = 2, J = :A, value = (2, :A))\n (I = 1, J = :B, value = (1, :B))\n (I = 2, J = :B, value = (2, :B))","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"Because it supports the Tables.jl interface, you can pass it to any function which accepts a table as input:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> import DataFrames;\n\njulia> DataFrames.DataFrame(table)\n4×3 DataFrame\n Row │ I J value\n │ Int64 Symbol Tuple…\n─────┼────────────────────────\n 1 │ 1 A (1, :A)\n 2 │ 2 A (2, :A)\n 3 │ 1 B (1, :B)\n 4 │ 2 B (2, :B)","category":"page"},{"location":"manual/containers/#Keyword-indexing","page":"Containers","title":"Keyword indexing","text":"","category":"section"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"If all axes are named, you can use keyword indexing:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> x[i = 2, j = :A]\n(2, :A)\n\njulia> x[i = :, j = :B]\n1-dimensional DenseAxisArray{Tuple{Int64, Symbol},1,...} with index sets:\n Dimension 1, Base.OneTo(2)\nAnd data, a 2-element Vector{Tuple{Int64, Symbol}}:\n (1, :B)\n (2, :B)","category":"page"},{"location":"manual/containers/#SparseAxisArray","page":"Containers","title":"SparseAxisArray","text":"","category":"section"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"A Containers.SparseAxisArray is created when the index sets are non-rectangular. This occurs in two circumstances:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"An index depends on a prior index:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> Containers.@container([i = 1:2, j = i:2], (i, j))\nJuMP.Containers.SparseAxisArray{Tuple{Int64, Int64}, 2, Tuple{Int64, Int64}} with 3 entries:\n [1, 1] = (1, 1)\n [1, 2] = (1, 2)\n [2, 2] = (2, 2)","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"The [indices; condition] syntax is used:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> x = Containers.@container([i = 1:3, j = [:A, :B]; i > 1], (i, j))\nJuMP.Containers.SparseAxisArray{Tuple{Int64, Symbol}, 2, Tuple{Int64, Symbol}} with 4 entries:\n [2, A] = (2, :A)\n [2, B] = (2, :B)\n [3, A] = (3, :A)\n [3, B] = (3, :B)","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"Here we have the index sets i = 1:3, j = [:A, :B], followed by ;, and then a condition, which evaluates to true or false: i > 1.","category":"page"},{"location":"manual/containers/#Slicing-3","page":"Containers","title":"Slicing","text":"","category":"section"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"Slicing is supported:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> y = x[:, :B]\nJuMP.Containers.SparseAxisArray{Tuple{Int64, Symbol}, 1, Tuple{Int64}} with 2 entries:\n [2] = (2, :B)\n [3] = (3, :B)","category":"page"},{"location":"manual/containers/#Looping-3","page":"Containers","title":"Looping","text":"","category":"section"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"Use eachindex to loop over the elements:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> for key in eachindex(x)\n println(x[key])\n end\n(2, :A)\n(2, :B)\n(3, :A)\n(3, :B)\n\njulia> for key in eachindex(y)\n println(y[key])\n end\n(2, :B)\n(3, :B)","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"warning: Warning\nIf you use a macro to construct a SparseAxisArray, then the iteration order is row-major, that is, indices are varied from right to left. As an example, when iterating over x above, the j index is iterated, keeping i constant. This order is in contrast to Base.Arrays, which iterate in column-major order, that is, by varying indices from left to right.","category":"page"},{"location":"manual/containers/#Broadcasting-3","page":"Containers","title":"Broadcasting","text":"","category":"section"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"Broadcasting over a SparseAxisArray returns a SparseAxisArray","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> swap(x::Tuple) = (last(x), first(x))\nswap (generic function with 1 method)\n\njulia> swap.(y)\nJuMP.Containers.SparseAxisArray{Tuple{Symbol, Int64}, 1, Tuple{Int64}} with 2 entries:\n [2] = (:B, 2)\n [3] = (:B, 3)","category":"page"},{"location":"manual/containers/#Tables-3","page":"Containers","title":"Tables","text":"","category":"section"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"Use Containers.rowtable to convert the SparseAxisArray into a Tables.jl compatible Vector{<:NamedTuple}:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> table = Containers.rowtable(x; header = [:I, :J, :value])\n4-element Vector{@NamedTuple{I::Int64, J::Symbol, value::Tuple{Int64, Symbol}}}:\n (I = 2, J = :A, value = (2, :A))\n (I = 2, J = :B, value = (2, :B))\n (I = 3, J = :A, value = (3, :A))\n (I = 3, J = :B, value = (3, :B))","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"Because it supports the Tables.jl interface, you can pass it to any function which accepts a table as input:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> import DataFrames;\n\njulia> DataFrames.DataFrame(table)\n4×3 DataFrame\n Row │ I J value\n │ Int64 Symbol Tuple…\n─────┼────────────────────────\n 1 │ 2 A (2, :A)\n 2 │ 2 B (2, :B)\n 3 │ 3 A (3, :A)\n 4 │ 3 B (3, :B)","category":"page"},{"location":"manual/containers/#Keyword-indexing-2","page":"Containers","title":"Keyword indexing","text":"","category":"section"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"If all axes are named, you can use keyword indexing:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> x[i = 2, j = :A]\n(2, :A)\n\njulia> x[i = :, j = :B]\nJuMP.Containers.SparseAxisArray{Tuple{Int64, Symbol}, 1, Tuple{Int64}} with 2 entries:\n [2] = (2, :B)\n [3] = (3, :B)","category":"page"},{"location":"manual/containers/#Forcing-the-container-type","page":"Containers","title":"Forcing the container type","text":"","category":"section"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"Pass container = T to use T as the container. For example:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> Containers.@container([i = 1:2, j = 1:2], i + j, container = Array)\n2×2 Matrix{Int64}:\n 2 3\n 3 4\n\njulia> Containers.@container([i = 1:2, j = 1:2], i + j, container = Dict)\nDict{Tuple{Int64, Int64}, Int64} with 4 entries:\n (1, 2) => 3\n (1, 1) => 2\n (2, 2) => 4\n (2, 1) => 3","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"You can also pass DenseAxisArray or SparseAxisArray.","category":"page"},{"location":"manual/containers/#How-different-container-types-are-chosen","page":"Containers","title":"How different container types are chosen","text":"","category":"section"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"If the compiler can prove at compile time that the index sets are rectangular, and indexed by a compact set of integers that start at 1, Containers.@container will return an array. This is the case if your index sets are visible to the macro as 1:n:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> Containers.@container([i=1:3, j=1:5], i + j)\n3×5 Matrix{Int64}:\n 2 3 4 5 6\n 3 4 5 6 7\n 4 5 6 7 8","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"or an instance of Base.OneTo:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> set = Base.OneTo(3)\nBase.OneTo(3)\n\njulia> Containers.@container([i=set, j=1:5], i + j)\n3×5 Matrix{Int64}:\n 2 3 4 5 6\n 3 4 5 6 7\n 4 5 6 7 8","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"If the compiler can prove that the index set is rectangular, but not necessarily of the form 1:n at compile time, then a Containers.DenseAxisArray will be constructed instead:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> set = 1:3\n1:3\n\njulia> Containers.@container([i=set, j=1:5], i + j)\n2-dimensional DenseAxisArray{Int64,2,...} with index sets:\n Dimension 1, 1:3\n Dimension 2, Base.OneTo(5)\nAnd data, a 3×5 Matrix{Int64}:\n 2 3 4 5 6\n 3 4 5 6 7\n 4 5 6 7 8","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"info: Info\nWhat happened here? Although we know that set contains 1:3, at compile time the typeof(set) is a UnitRange{Int}. Therefore, Julia can't prove that the range starts at 1 (it only finds this out at runtime), and it defaults to a DenseAxisArray. The case where we explicitly wrote i = 1:3 worked because the macro can \"see\" the 1 at compile time.","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"However, if you know that the indices do form an Array, you can force the container type with container = Array:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> set = 1:3\n1:3\n\njulia> Containers.@container([i=set, j=1:5], i + j, container = Array)\n3×5 Matrix{Int64}:\n 2 3 4 5 6\n 3 4 5 6 7\n 4 5 6 7 8","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"Here's another example with something similar:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> a = 1\n1\n\njulia> Containers.@container([i=a:3, j=1:5], i + j)\n2-dimensional DenseAxisArray{Int64,2,...} with index sets:\n Dimension 1, 1:3\n Dimension 2, Base.OneTo(5)\nAnd data, a 3×5 Matrix{Int64}:\n 2 3 4 5 6\n 3 4 5 6 7\n 4 5 6 7 8\n\njulia> Containers.@container([i=1:a, j=1:5], i + j)\n1×5 Matrix{Int64}:\n 2 3 4 5 6","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"Finally, if the compiler cannot prove that the index set is rectangular, a Containers.SparseAxisArray will be created.","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"This occurs when some indices depend on a previous one:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> Containers.@container([i=1:3, j=1:i], i + j)\nJuMP.Containers.SparseAxisArray{Int64, 2, Tuple{Int64, Int64}} with 6 entries:\n [1, 1] = 2\n [2, 1] = 3\n [2, 2] = 4\n [3, 1] = 4\n [3, 2] = 5\n [3, 3] = 6","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"or if there is a condition on the index sets:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> Containers.@container([i = 1:5; isodd(i)], i^2)\nJuMP.Containers.SparseAxisArray{Int64, 1, Tuple{Int64}} with 3 entries:\n [1] = 1\n [3] = 9\n [5] = 25","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"The condition can depend on multiple indices, the only requirement is that it is an expression that returns true or false:","category":"page"},{"location":"manual/containers/","page":"Containers","title":"Containers","text":"julia> condition(i, j) = isodd(i) && iseven(j)\ncondition (generic function with 1 method)\n\njulia> Containers.@container([i = 1:2, j = 1:4; condition(i, j)], i + j)\nJuMP.Containers.SparseAxisArray{Int64, 2, Tuple{Int64, Int64}} with 2 entries:\n [1, 2] = 3\n [1, 4] = 5","category":"page"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"EditURL = \"https://github.com/jump-dev/MathOptInterface.jl/blob/v1.34.0/docs/src/manual/solutions.md\"","category":"page"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"CurrentModule = MathOptInterface\nDocTestSetup = quote\n import MathOptInterface as MOI\nend\nDocTestFilters = [r\"MathOptInterface|MOI\"]","category":"page"},{"location":"moi/manual/solutions/#manual_solutions","page":"Solutions","title":"Solutions","text":"","category":"section"},{"location":"moi/manual/solutions/#Solving-and-retrieving-the-results","page":"Solutions","title":"Solving and retrieving the results","text":"","category":"section"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"Once an optimizer is loaded with the objective function and all of the constraints, we can ask the solver to solve the model by calling optimize!.","category":"page"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"MOI.optimize!(optimizer)","category":"page"},{"location":"moi/manual/solutions/#Why-did-the-solver-stop?","page":"Solutions","title":"Why did the solver stop?","text":"","category":"section"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"The optimization procedure may stop for a number of reasons. The TerminationStatus attribute of the optimizer returns a TerminationStatusCode object which explains why the solver stopped.","category":"page"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"The termination statuses distinguish between proofs of optimality, infeasibility, local convergence, limits, and termination because of something unexpected like invalid problem data or failure to converge.","category":"page"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"A typical usage of the TerminationStatus attribute is as follows:","category":"page"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"status = MOI.get(optimizer, TerminationStatus())\nif status == MOI.OPTIMAL\n # Ok, we solved the problem!\nelse\n # Handle other cases.\nend","category":"page"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"After checking the TerminationStatus, check ResultCount. This attribute returns the number of results that the solver has available to return. A result is defined as a primal-dual pair, but either the primal or the dual may be missing from the result. While the OPTIMAL termination status normally implies that at least one result is available, other statuses do not. For example, in the case of infeasibility, a solver may return no result or a proof of infeasibility. The ResultCount attribute distinguishes between these two cases.","category":"page"},{"location":"moi/manual/solutions/#Primal-solutions","page":"Solutions","title":"Primal solutions","text":"","category":"section"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"Use the PrimalStatus optimizer attribute to return a ResultStatusCode describing the status of the primal solution.","category":"page"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"Common returns are described below in the Common status situations section.","category":"page"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"Query the primal solution using the VariablePrimal and ConstraintPrimal attributes.","category":"page"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"Query the objective function value using the ObjectiveValue attribute.","category":"page"},{"location":"moi/manual/solutions/#Dual-solutions","page":"Solutions","title":"Dual solutions","text":"","category":"section"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"warning: Warning\nSee Duality for a discussion of the MOI conventions for primal-dual pairs and certificates.","category":"page"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"Use the DualStatus optimizer attribute to return a ResultStatusCode describing the status of the dual solution.","category":"page"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"Query the dual solution using the ConstraintDual attribute.","category":"page"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"Query the dual objective function value using the DualObjectiveValue attribute.","category":"page"},{"location":"moi/manual/solutions/#Common-status-situations","page":"Solutions","title":"Common status situations","text":"","category":"section"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"The sections below describe how to interpret typical or interesting status cases for three common classes of solvers. The example cases are illustrative, not comprehensive. Solver wrappers may provide additional information on how the solver's statuses map to MOI statuses.","category":"page"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"info: Info\n* in the tables indicate that multiple different values are possible.","category":"page"},{"location":"moi/manual/solutions/#Primal-dual-convex-solver","page":"Solutions","title":"Primal-dual convex solver","text":"","category":"section"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"Linear programming and conic optimization solvers fall into this category.","category":"page"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"What happened? TerminationStatus ResultCount PrimalStatus DualStatus\nProved optimality OPTIMAL 1 FEASIBLE_POINT FEASIBLE_POINT\nProved infeasible INFEASIBLE 1 NO_SOLUTION INFEASIBILITY_CERTIFICATE\nOptimal within relaxed tolerances ALMOST_OPTIMAL 1 FEASIBLE_POINT FEASIBLE_POINT\nOptimal within relaxed tolerances ALMOST_OPTIMAL 1 ALMOST_FEASIBLE_POINT ALMOST_FEASIBLE_POINT\nDetected an unbounded ray of the primal DUAL_INFEASIBLE 1 INFEASIBILITY_CERTIFICATE NO_SOLUTION\nStall SLOW_PROGRESS 1 * *","category":"page"},{"location":"moi/manual/solutions/#Global-branch-and-bound-solvers","page":"Solutions","title":"Global branch-and-bound solvers","text":"","category":"section"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"Mixed-integer programming solvers fall into this category.","category":"page"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"What happened? TerminationStatus ResultCount PrimalStatus DualStatus\nProved optimality OPTIMAL 1 FEASIBLE_POINT NO_SOLUTION\nPresolve detected infeasibility or unboundedness INFEASIBLE_OR_UNBOUNDED 0 NO_SOLUTION NO_SOLUTION\nProved infeasibility INFEASIBLE 0 NO_SOLUTION NO_SOLUTION\nTimed out (no solution) TIME_LIMIT 0 NO_SOLUTION NO_SOLUTION\nTimed out (with a solution) TIME_LIMIT 1 FEASIBLE_POINT NO_SOLUTION\nCPXMIP_OPTIMAL_INFEAS ALMOST_OPTIMAL 1 INFEASIBLE_POINT NO_SOLUTION","category":"page"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"info: Info\nCPXMIP_OPTIMAL_INFEAS is a CPLEX status that indicates that a preprocessed problem was solved to optimality, but the solver was unable to recover a feasible solution to the original problem. Handling this status was one of the motivating drivers behind the design of MOI.","category":"page"},{"location":"moi/manual/solutions/#Local-search-solvers","page":"Solutions","title":"Local search solvers","text":"","category":"section"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"Nonlinear programming solvers fall into this category. It also includes non-global tree search solvers like Juniper.","category":"page"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"What happened? TerminationStatus ResultCount PrimalStatus DualStatus\nConverged to a stationary point LOCALLY_SOLVED 1 FEASIBLE_POINT FEASIBLE_POINT\nCompleted a non-global tree search (with a solution) LOCALLY_SOLVED 1 FEASIBLE_POINT FEASIBLE_POINT\nConverged to an infeasible point LOCALLY_INFEASIBLE 1 INFEASIBLE_POINT *\nCompleted a non-global tree search (no solution found) LOCALLY_INFEASIBLE 0 NO_SOLUTION NO_SOLUTION\nIteration limit ITERATION_LIMIT 1 * *\nDiverging iterates NORM_LIMIT or OBJECTIVE_LIMIT 1 * *","category":"page"},{"location":"moi/manual/solutions/#Querying-solution-attributes","page":"Solutions","title":"Querying solution attributes","text":"","category":"section"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"Some solvers will not implement every solution attribute. Therefore, a call like MOI.get(model, MOI.SolveTimeSec()) may throw an UnsupportedAttribute error.","category":"page"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"If you need to write code that is agnostic to the solver (for example, you are writing a library that an end-user passes their choice of solver to), you can work-around this problem using a try-catch:","category":"page"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"function get_solve_time(model)\n try\n return MOI.get(model, MOI.SolveTimeSec())\n catch err\n if err isa MOI.UnsupportedAttribute\n return NaN # Solver doesn't support. Return a placeholder value.\n end\n rethrow(err) # Something else went wrong. Rethrow the error\n end\nend","category":"page"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"If, after careful profiling, you find that the try-catch is taking a significant portion of your runtime, you can improve performance by caching the result of the try-catch:","category":"page"},{"location":"moi/manual/solutions/","page":"Solutions","title":"Solutions","text":"mutable struct CachedSolveTime{M}\n model::M\n supports_solve_time::Bool\n CachedSolveTime(model::M) where {M} = new(model, true)\nend\n\nfunction get_solve_time(model::CachedSolveTime)\n if !model.supports_solve_time\n return NaN\n end\n try\n return MOI.get(model, MOI.SolveTimeSec())\n catch err\n if err isa MOI.UnsupportedAttribute\n model.supports_solve_time = false\n return NaN\n end\n rethrow(err) # Something else went wrong. Rethrow the error\n end\nend","category":"page"},{"location":"tutorials/getting_started/introduction/#Introduction","page":"Introduction","title":"Introduction","text":"","category":"section"},{"location":"tutorials/getting_started/introduction/","page":"Introduction","title":"Introduction","text":"The purpose of these \"Getting started\" tutorials is to teach new users the basics of Julia and JuMP.","category":"page"},{"location":"tutorials/getting_started/introduction/#How-these-tutorials-are-structured","page":"Introduction","title":"How these tutorials are structured","text":"","category":"section"},{"location":"tutorials/getting_started/introduction/","page":"Introduction","title":"Introduction","text":"Having a high-level overview of how this part of the documentation is structured will help you know where to look for certain things.","category":"page"},{"location":"tutorials/getting_started/introduction/","page":"Introduction","title":"Introduction","text":"The \"Getting started with\" tutorials are basic introductions to different aspects of JuMP and Julia. If you are new to JuMP and Julia, start by reading them in the following order:\nGetting started with Julia\nGetting started with JuMP\nGetting started with sets and indexing\nGetting started with data and plotting\nJulia has a reputation for being \"fast.\" Unfortunately, it is also easy to write slow Julia code. Performance tips contains a number of important tips on how to improve the performance of models you write in JuMP.\nDesign patterns for larger models is a more advanced tutorial that is aimed at users writing large JuMP models. It's in the \"Getting started\" section to give you an early preview of how JuMP makes it easy to structure larger models. If you are new to JuMP you may want to skip or briefly skim this tutorial, and come back to it once you have written a few JuMP models.","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"EditURL = \"two_stage_stochastic.jl\"","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/#Two-stage-stochastic-programs","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"","category":"section"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"This tutorial was generated using Literate.jl. Download the source as a .jl file.","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"The purpose of this tutorial is to demonstrate how to model and solve a two-stage stochastic program.","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"info: Info\nThe JuMP extension InfiniteOpt.jl can also be used to model and solve two-stage stochastic programs. The JuMP extension SDDP.jl can be used to model and solve multi-stage stochastic programs.","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"This tutorial uses the following packages","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"using JuMP\nimport Distributions\nimport HiGHS\nimport Plots\nimport StatsPlots\nimport Statistics","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/#Background","page":"Two-stage stochastic programs","title":"Background","text":"","category":"section"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"During the week, you are a busy practitioner of Operations Research. To escape the drudgery of mathematics, you decide to open a side business selling creamy mushroom pies with puff pastry. After a few weeks, it quickly becomes apparent that operating a food business is not so easy.","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"The pies must be prepared in the morning, before you open for the day and can gauge the level of demand. If you bake too many, the unsold pies at the end of the day must be discarded and you have wasted time and money on their production. But if you bake too few, then there may be un-served customers and you could have made more money by baking more pies.","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"After a few weeks of poor decision making, you decide to put your knowledge of Operations Research to good use, starting with some data collection.","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"Each pie costs you $2 to make, and you sell them at $5 each. Disposal of an unsold pie costs $0.10. Based on three weeks of data collected, in which you made 200 pies each week, you sold 150, 190, and 200 pies. Thus, as a guess, you assume a triangular distribution of demand with a minimum of 150, a median of 200, and a maximum of 250.","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"We can model this problem by a two-stage stochastic program. In the first stage, we decide a quantity of pies to make x. We make this decision before we observe the demand d_omega. In the second stage, we sell y_omega pies, and incur any costs for unsold pies.","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"We can formulate this problem as follows:","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"beginaligned\nmaxlimits_xy_omega -2x + mathbbE_omega5y_omega - 01(x - y_omega) \n y_omega le x quad forall omega in Omega \n 0 le y_omega le d_omega quad forall omega in Omega \n x ge 0\nendaligned","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/#Sample-Average-approximation","page":"Two-stage stochastic programs","title":"Sample Average approximation","text":"","category":"section"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"If the distribution of demand is continuous, then our problem has an infinite number of variables and constraints. To form a computationally tractable problem, we instead use a finite set of samples drawn from the distribution. This is called sample average approximation (SAA).","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"D = Distributions.TriangularDist(150.0, 250.0, 200.0)\nN = 100\nd = sort!(rand(D, N));\nΩ = 1:N\nP = fill(1 / N, N);\nStatsPlots.histogram(d; bins = 20, label = \"\", xlabel = \"Demand\")","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/#JuMP-model","page":"Two-stage stochastic programs","title":"JuMP model","text":"","category":"section"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"The implementation of our two-stage stochastic program in JuMP is:","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"model = Model(HiGHS.Optimizer)\nset_silent(model)\n@variable(model, x >= 0)\n@variable(model, 0 <= y[ω in Ω] <= d[ω])\n@constraint(model, [ω in Ω], y[ω] <= x)\n@expression(model, z[ω in Ω], 5y[ω] - 0.1 * (x - y[ω]))\n@objective(model, Max, -2x + sum(P[ω] * z[ω] for ω in Ω))\noptimize!(model)\n@assert is_solved_and_feasible(model)\nsolution_summary(model)","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"The optimal number of pies to make is:","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"value(x)","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"The distribution of total profit is:","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"total_profit = [-2 * value(x) + value(z[ω]) for ω in Ω]","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"Let's plot it:","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"\"\"\"\n bin_distribution(x::Vector{Float64}, N::Int)\n\nA helper function that discretizes `x` into bins of width `N`.\n\"\"\"\nbin_distribution(x, N) = N * (floor(minimum(x) / N):ceil(maximum(x) / N))\n\nplot = StatsPlots.histogram(\n total_profit;\n bins = bin_distribution(total_profit, 25),\n label = \"\",\n xlabel = \"Profit [\\$]\",\n ylabel = \"Number of outcomes\",\n)\nμ = Statistics.mean(total_profit)\nPlots.vline!(\n plot,\n [μ];\n label = \"Expected profit (\\$$(round(Int, μ)))\",\n linewidth = 3,\n)\nplot","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/#Risk-measures","page":"Two-stage stochastic programs","title":"Risk measures","text":"","category":"section"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"A risk measure is a function which maps a random variable to a real number. Common risk measures include the mean (expectation), median, mode, and maximum. We need a risk measure to convert the distribution of second stage costs into a single number that can be optimized.","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"Our model currently uses the expectation risk measure, but others are possible too. One popular risk measure is the conditional value at risk (CVaR).","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"CVaR has a parameter gamma, and it computes the expectation of the worst gamma fraction of outcomes.","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"If we are maximizing, so that small outcomes are bad, the definition of CVaR is:","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"CVaR_gammaZ = maxlimits_xi xi - frac1gammamathbbE_omegaleft(xi - Z)_+right","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"which can be formulated as the linear program:","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"beginaligned\nCVaR_gammaZ = maxlimits_xi z_omega xi - frac1gammasum P_omega z_omega\n z_omega ge xi - Z_omega quad forall omega \n z_omega ge 0 quad forall omega\nendaligned","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"function CVaR(Z::Vector{Float64}, P::Vector{Float64}; γ::Float64)\n @assert 0 < γ <= 1\n N = length(Z)\n model = Model(HiGHS.Optimizer)\n set_silent(model)\n @variable(model, ξ)\n @variable(model, z[1:N] >= 0)\n @constraint(model, [i in 1:N], z[i] >= ξ - Z[i])\n @objective(model, Max, ξ - 1 / γ * sum(P[i] * z[i] for i in 1:N))\n optimize!(model)\n @assert is_solved_and_feasible(model)\n return objective_value(model)\nend","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"When γ is 1.0, we compute the mean of the profit:","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"cvar_10 = CVaR(total_profit, P; γ = 1.0)","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"Statistics.mean(total_profit)","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"As γ approaches 0.0, we compute the worst-case (minimum) profit:","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"cvar_00 = CVaR(total_profit, P; γ = 0.0001)","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"minimum(total_profit)","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"By varying γ between 0 and 1 we can compute some trade-off of these two extremes:","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"cvar_05 = CVaR(total_profit, P; γ = 0.5)","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"Let's plot these outcomes on our distribution:","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"plot = StatsPlots.histogram(\n total_profit;\n bins = bin_distribution(total_profit, 25),\n label = \"\",\n xlabel = \"Profit [\\$]\",\n ylabel = \"Number of outcomes\",\n)\nPlots.vline!(\n plot,\n [cvar_10 cvar_05 cvar_00];\n label = [\"γ = 1.0\" \"γ = 0.5\" \"γ = 0.0\"],\n linewidth = 3,\n)\nplot","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/#Risk-averse-sample-average-approximation","page":"Two-stage stochastic programs","title":"Risk averse sample average approximation","text":"","category":"section"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"Because CVaR can be formulated as a linear program, we can form a risk averse sample average approximation model by combining the two formulations:","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"γ = 0.4\nmodel = Model(HiGHS.Optimizer)\nset_silent(model)\n@variable(model, x >= 0)\n@variable(model, 0 <= y[ω in Ω] <= d[ω])\n@constraint(model, [ω in Ω], y[ω] <= x)\n@expression(model, Z[ω in Ω], 5 * y[ω] - 0.1(x - y[ω]))\n@variable(model, ξ)\n@variable(model, z[ω in Ω] >= 0)\n@constraint(model, [ω in Ω], z[ω] >= ξ - Z[ω])\n@objective(model, Max, -2x + ξ - 1 / γ * sum(P[ω] * z[ω] for ω in Ω))\noptimize!(model)\n@assert is_solved_and_feasible(model)","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"When gamma = 04, the optimal number of pies to bake is:","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"value(x)","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"The distribution of total profit is:","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"risk_averse_total_profit = [value(-2x + Z[ω]) for ω in Ω]\nbins = bin_distribution([total_profit; risk_averse_total_profit], 25)\nplot = StatsPlots.histogram(total_profit; label = \"Expectation\", bins = bins)\nStatsPlots.histogram!(\n plot,\n risk_averse_total_profit;\n label = \"CV@R\",\n bins = bins,\n alpha = 0.5,\n)\nplot","category":"page"},{"location":"tutorials/applications/two_stage_stochastic/#Next-steps","page":"Two-stage stochastic programs","title":"Next steps","text":"","category":"section"},{"location":"tutorials/applications/two_stage_stochastic/","page":"Two-stage stochastic programs","title":"Two-stage stochastic programs","text":"Try solving this problem for different numbers of samples and different distributions.\nRefactor the example to avoid hard-coding the costs. What happens to the solution if the cost of disposing unsold pies increases?\nPlot the optimal number of pies to make for different values of the risk aversion parameter gamma. What is the relationship?","category":"page"},{"location":"packages/solvers/#Introduction","page":"Introduction","title":"Introduction","text":"","category":"section"},{"location":"packages/solvers/","page":"Introduction","title":"Introduction","text":"This section of the documentation contains brief documentation for some of the solvers that JuMP supports. The list of solvers is not exhaustive, but instead is intended to help you discover commonly used solvers.","category":"page"},{"location":"packages/solvers/#Affiliation","page":"Introduction","title":"Affiliation","text":"","category":"section"},{"location":"packages/solvers/","page":"Introduction","title":"Introduction","text":"Packages beginning with jump-dev/ are developed and maintained by the JuMP developers. In many cases, these packages wrap external solvers that are not developed by the JuMP developers and, while the Julia packages are all open-source, in some cases the solvers themselves are closed source commercial products.","category":"page"},{"location":"packages/solvers/","page":"Introduction","title":"Introduction","text":"Packages that do not begin with jump-dev/ are developed independently. The developers of these packages requested or consented to the inclusion of their README contents in the JuMP documentation for the benefit of users.","category":"page"},{"location":"packages/solvers/#Adding-new-solvers","page":"Introduction","title":"Adding new solvers","text":"","category":"section"},{"location":"packages/solvers/","page":"Introduction","title":"Introduction","text":"Written a solver? Add it to this section of the JuMP documentation by making a pull request to the docs/packages.toml file.","category":"page"},{"location":"packages/MathOptAI/","page":"lanl-ansi/MathOptAI.jl","title":"lanl-ansi/MathOptAI.jl","text":"EditURL = \"https://github.com/lanl-ansi/MathOptAI.jl/blob/v0.1.6/README.md\"","category":"page"},{"location":"packages/MathOptAI/","page":"lanl-ansi/MathOptAI.jl","title":"lanl-ansi/MathOptAI.jl","text":"(Image: )","category":"page"},{"location":"packages/MathOptAI/#MathOptAI.jl","page":"lanl-ansi/MathOptAI.jl","title":"MathOptAI.jl","text":"","category":"section"},{"location":"packages/MathOptAI/","page":"lanl-ansi/MathOptAI.jl","title":"lanl-ansi/MathOptAI.jl","text":"(Image: Build Status) (Image: Code coverage)","category":"page"},{"location":"packages/MathOptAI/","page":"lanl-ansi/MathOptAI.jl","title":"lanl-ansi/MathOptAI.jl","text":"MathOptAI.jl is a JuMP extension for embedding trained AI, machine learning, and statistical learning models into a JuMP optimization model.","category":"page"},{"location":"packages/MathOptAI/#License","page":"lanl-ansi/MathOptAI.jl","title":"License","text":"","category":"section"},{"location":"packages/MathOptAI/","page":"lanl-ansi/MathOptAI.jl","title":"lanl-ansi/MathOptAI.jl","text":"MathOptAI.jl is provided under a BSD-3 license as part of the Optimization and Machine Learning Toolbox project, O4806.","category":"page"},{"location":"packages/MathOptAI/","page":"lanl-ansi/MathOptAI.jl","title":"lanl-ansi/MathOptAI.jl","text":"See LICENSE.md for details.","category":"page"},{"location":"packages/MathOptAI/","page":"lanl-ansi/MathOptAI.jl","title":"lanl-ansi/MathOptAI.jl","text":"Despite the name similarity, this project is not affiliated with OMLT, the Optimization and Machine Learning Toolkit.","category":"page"},{"location":"packages/MathOptAI/#Installation","page":"lanl-ansi/MathOptAI.jl","title":"Installation","text":"","category":"section"},{"location":"packages/MathOptAI/","page":"lanl-ansi/MathOptAI.jl","title":"lanl-ansi/MathOptAI.jl","text":"Install MathOptAI.jl using the Julia package manager:","category":"page"},{"location":"packages/MathOptAI/","page":"lanl-ansi/MathOptAI.jl","title":"lanl-ansi/MathOptAI.jl","text":"import Pkg\nPkg.add(\"MathOptAI\")","category":"page"},{"location":"packages/MathOptAI/#Getting-started","page":"lanl-ansi/MathOptAI.jl","title":"Getting started","text":"","category":"section"},{"location":"packages/MathOptAI/","page":"lanl-ansi/MathOptAI.jl","title":"lanl-ansi/MathOptAI.jl","text":"Here's an example of using MathOptAI to embed a trained neural network from Flux into a JuMP model. The vector of JuMP variables x is fed as input to the neural network. The output y is a vector of JuMP variables that represents the output layer of the neural network. The formulation object stores the additional variables and constraints that were added to model.","category":"page"},{"location":"packages/MathOptAI/","page":"lanl-ansi/MathOptAI.jl","title":"lanl-ansi/MathOptAI.jl","text":"julia> using JuMP, MathOptAI, Flux\n\njulia> predictor = Flux.Chain(\n Flux.Dense(28^2 => 32, Flux.sigmoid),\n Flux.Dense(32 => 10),\n Flux.softmax,\n );\n\njulia> #= Train the Flux model. Code not shown for simplicity =#\n\njulia> model = JuMP.Model();\n\njulia> JuMP.@variable(model, 0 <= x[1:28^2] <= 1);\n\njulia> y, formulation = MathOptAI.add_predictor(model, predictor, x);\n\njulia> y\n10-element Vector{VariableRef}:\n moai_SoftMax[1]\n moai_SoftMax[2]\n moai_SoftMax[3]\n moai_SoftMax[4]\n moai_SoftMax[5]\n moai_SoftMax[6]\n moai_SoftMax[7]\n moai_SoftMax[8]\n moai_SoftMax[9]\n moai_SoftMax[10]","category":"page"},{"location":"packages/MathOptAI/#Documentation","page":"lanl-ansi/MathOptAI.jl","title":"Documentation","text":"","category":"section"},{"location":"packages/MathOptAI/","page":"lanl-ansi/MathOptAI.jl","title":"lanl-ansi/MathOptAI.jl","text":"Documentation is available at https://lanl-ansi.github.io/MathOptAI.jl.","category":"page"},{"location":"packages/MathOptAI/#Getting-help","page":"lanl-ansi/MathOptAI.jl","title":"Getting help","text":"","category":"section"},{"location":"packages/MathOptAI/","page":"lanl-ansi/MathOptAI.jl","title":"lanl-ansi/MathOptAI.jl","text":"For help, questions, comments, and suggestions, please open a GitHub issue.","category":"page"},{"location":"packages/MathOptAI/#Inspiration","page":"lanl-ansi/MathOptAI.jl","title":"Inspiration","text":"","category":"section"},{"location":"packages/MathOptAI/","page":"lanl-ansi/MathOptAI.jl","title":"lanl-ansi/MathOptAI.jl","text":"This project is mainly inspired by two existing projects:","category":"page"},{"location":"packages/MathOptAI/","page":"lanl-ansi/MathOptAI.jl","title":"lanl-ansi/MathOptAI.jl","text":"OMLT\ngurobi-machinelearning","category":"page"},{"location":"packages/MathOptAI/","page":"lanl-ansi/MathOptAI.jl","title":"lanl-ansi/MathOptAI.jl","text":"Other works, from which we took less inspiration, include:","category":"page"},{"location":"packages/MathOptAI/","page":"lanl-ansi/MathOptAI.jl","title":"lanl-ansi/MathOptAI.jl","text":"JANOS\nMeLOn\nENTMOOT\nreluMIP\nOptiCL\nPySCIPOpt-ML","category":"page"},{"location":"packages/MathOptAI/","page":"lanl-ansi/MathOptAI.jl","title":"lanl-ansi/MathOptAI.jl","text":"The 2024 paper of López-Flores et al. is an excellent summary of the state of the field at the time that we started development of MathOptAI.","category":"page"},{"location":"packages/MathOptAI/","page":"lanl-ansi/MathOptAI.jl","title":"lanl-ansi/MathOptAI.jl","text":"López-Flores, F.J., Ramírez-Márquez, C., Ponce-Ortega J.M. (2024). Process Systems Engineering Tools for Optimization of Trained Machine Learning Models: Comparative and Perspective. Industrial & Engineering Chemistry Research, 63(32), 13966-13979. DOI: 10.1021/acs.iecr.4c00632","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"EditURL = \"rolling_horizon.jl\"","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/#Rolling-horizon-problems","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"","category":"section"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"This tutorial was generated using Literate.jl. Download the source as a .jl file.","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"This tutorial was originally contributed by Diego Tejada.","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"The purpose of this tutorial is to demonstrate how to use ParametricOptInterface.jl to solve a rolling horizon optimization problem.","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"The term \"rolling horizon\" refers to solving a time-dependent model repeatedly, where the planning interval is shifted forward in time during each solution step.","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"As a motivating example, this tutorial models the operations of a power system with solar generation and a battery.","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/#Required-packages","page":"Rolling horizon problems","title":"Required packages","text":"","category":"section"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"This tutorial uses the following packages","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"using JuMP\nimport CSV\nimport DataFrames\nimport HiGHS\nimport ParametricOptInterface as POI\nimport Plots","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/#The-optimization-model","page":"Rolling horizon problems","title":"The optimization model","text":"","category":"section"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"The model is a simplified model of a power system's operations with battery storage.","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"We model the system of a set of time-steps t in 1ldotsT, where each time step is a period of one hour.","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"There are five types of decision variables in the model:","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"Renewable production: r_t geq 0\nThermal production: 0 leq p_t leq overlineP\nStorage level: 0 leq s_t leq overlineS\nStorage charging: 0 leq c_t leq overlineC\nStorage discharging: 0 leq d_t leq overlineD","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"For the purpose of this tutorial, there are three parameters of interest:","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"Demand at time t: D_t\nRenewable availability at time t: A_t\nInitial storage: S_0","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"The objective function to minimize is the total cost of thermal generation:","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"min sum_t O cdot p_t","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"For the constraints, we must balance power generation and consumption in all time periods:","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"p_t + r_t + d_t = D_t + c_t forall t","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"We need to account for the dynamics of the battery storage:","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"s_t = s_t-1 + eta^c cdot c_t - fracd_teta^d forall t","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"with the boundary condition that s_0 = S_0.","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"Finally, the level of renewable energy production is limited by the quantity of potential solar generation A:","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"r_t leq A_t quad forall t","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"Solving this problem with a large number of time steps is computationally challenging. A common practice is to use the rolling horizon idea to solve multiple identical problems of a smaller size. These problems differ only in parameters such as demand, renewable availability, and initial storage. By combining the solution of many smaller problems, we can recover a feasible solution to the full problem. However, because we don't optimize the full set of decisions in a single optimization problem, the recovered solution might be suboptimal.","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/#Parameter-definition-and-input-data","page":"Rolling horizon problems","title":"Parameter definition and input data","text":"","category":"section"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"There are two main parameters for a rolling horizon implementation: the optimization window and the move forward.","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"Optimization Window: this value defines how many periods (for example, hours) we will optimize each time. For this example, we set the default value to 48 hours, meaning we will optimize two days each time.","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"optimization_window = 48;\nnothing #hide","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"Move Forward: this value defines how many periods (for example, hours) we will move forward to optimize the next optimization window. For this example, we set the default value in 24 hours, meaning we will move one day ahead each time.","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"move_forward = 24;\nnothing #hide","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"Note that the move forward parameter must be lower or equal to the optimization window parameter to work correctly.","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"@assert optimization_window >= move_forward","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"Let's explore the input data in file rolling_horizon.csv. We have a total time horizon of a week (that is, 168 hours), an electricity demand, and a solar production profile.","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"filename = joinpath(@__DIR__, \"rolling_horizon.csv\")\ntime_series = CSV.read(filename, DataFrames.DataFrame)\ntime_series[1:21:end, :]","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"We define the solar investment (for example, 150 MW) to determine the solar production during the operation optimization step.","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"solar_investment = 150;\nnothing #hide","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"We multiply the level of solar investment by the time series of availability to get actual MW generated.","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"time_series.solar_MW = solar_investment * time_series.solar_pu;\nnothing #hide","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"In addition, we can determine some basic information about the rolling horizon, such as the number of data points we have:","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"total_time_length = size(time_series, 1)","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"and the number of windows that we are going to optimize given the problem's time horizon:","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"(total_time_length + move_forward - optimization_window) / move_forward","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"Finally, we can see a plot representing the first two optimization windows and the move forward parameter to have a better idea of how the rolling horizon works.","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"x_series = 1:total_time_length\ny_series = [time_series.demand_MW, time_series.solar_MW]\nplot_1 = Plots.plot(x_series, y_series; label = [\"demand\" \"solar\"])\nplot_2 = Plots.plot(x_series, y_series; label = false)\nwindow = [0, optimization_window]\nPlots.vspan!(plot_1, window; alpha = 0.25, label = false)\nPlots.vspan!(plot_2, move_forward .+ window; alpha = 0.25, label = false)\ntext_1 = Plots.text(\"optimization\\n window 1\", :top, :left, 8)\nPlots.annotate!(plot_1, 18, time_series.solar_MW[12], text_1)\ntext_2 = Plots.text(\"optimization\\n window 2\", :top, :left, 8)\nPlots.annotate!(plot_2, 42, time_series.solar_MW[12], text_2)\nPlots.plot(\n plot_1,\n plot_2;\n layout = (2, 1),\n linewidth = 3,\n xticks = 0:12:total_time_length,\n xlabel = \"Hours\",\n ylabel = \"MW\",\n)","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/#JuMP-model","page":"Rolling horizon problems","title":"JuMP model","text":"","category":"section"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"We have all the information we need to create a JuMP model to solve a single window of our rolling horizon problem.","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"As the optimizer, we use POI.Optimizer, which is part of ParametricOptInterface.jl. POI.Optimizer converts the Parameter decision variables into constants in the underlying optimization model, and it efficiently updates the solver in-place when we call set_parameter_value which avoids having to rebuild the problem each time we call optimize!.","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"model = Model(() -> POI.Optimizer(HiGHS.Optimizer()))\nset_silent(model)\n@variables(model, begin\n 0 <= r[1:optimization_window]\n 0 <= p[1:optimization_window] <= 150\n 0 <= s[1:optimization_window] <= 40\n 0 <= c[1:optimization_window] <= 10\n 0 <= d[1:optimization_window] <= 10\n # Initialize empty parameters. These values will get updated later\n D[t in 1:optimization_window] in Parameter(0)\n A[t in 1:optimization_window] in Parameter(0)\n S_0 in Parameter(0)\nend)\n@objective(model, Min, 50 * sum(p))\n@constraints(\n model,\n begin\n p .+ r .+ d .== D .+ c\n s[1] == S_0 + 0.9 * c[1] - d[1] / 0.9\n [t in 2:optimization_window], s[t] == s[t-1] + 0.9 * c[t] - d[t] / 0.9\n r .<= A\n end\n)\nmodel","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"After the optimization, we can store the results in vectors. It's important to note that despite optimizing for 48 hours (the default value), we only store the values for the \"move forward\" parameter (for example, 24 hours or one day using the default value). This approach ensures that there is a buffer of additional periods or hours beyond the \"move forward\" parameter to prevent the storage from depleting entirely at the end of the specified hours.","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"sol_complete = Dict(\n :r => zeros(total_time_length),\n :p => zeros(total_time_length),\n :c => zeros(total_time_length),\n :d => zeros(total_time_length),\n # The storage level is initialized with an initial value\n :s => zeros(total_time_length + 1),\n)\nsol_windows = Pair{Int,Dict{Symbol,Vector{Float64}}}[]","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"Now we can iterate across the windows of our rolling horizon problem, and at each window, we:","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"update the parameters in the models\nsolve the model for that window\nstore the results for later analysis","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"offsets = 0:move_forward:total_time_length-optimization_window\nfor offset in offsets\n # Step 1: update the parameter values over the optimization_window\n for t in 1:optimization_window\n set_parameter_value(model[:D][t], time_series[offset+t, :demand_MW])\n set_parameter_value(model[:A][t], time_series[offset+t, :solar_MW])\n end\n # Set the starting storage level as the value from the end of the previous\n # solve. The `+1` accounts for the initial storage value in time step \"t=0\"\n set_parameter_value(model[:S_0], sol_complete[:s][offset+1])\n # Step 2: solve the model\n optimize!(model)\n # Step 3: store the results of the move_forward values, except in the last\n # horizon where we store the full `optimization_window`.\n for t in 1:(offset == last(offsets) ? optimization_window : move_forward)\n for key in (:r, :p, :c, :d)\n sol_complete[key][offset+t] = value(model[key][t])\n end\n sol_complete[:s][offset+t+1] = value(model[:s][t])\n end\n sol_window = Dict(key => value.(model[key]) for key in (:r, :p, :s, :c, :d))\n push!(sol_windows, offset => sol_window)\nend","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/#Solution","page":"Rolling horizon problems","title":"Solution","text":"","category":"section"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"Here is a function to plot the solution at each of the time-steps to help visualize the rolling horizon scheme:","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"function plot_solution(sol; offset = 0, kwargs...)\n plot = Plots.plot(;\n ylabel = \"MW\",\n xlims = (0, total_time_length),\n xticks = 0:12:total_time_length,\n kwargs...,\n )\n y = hcat(sol[:p], sol[:r], sol[:d])\n x = offset .+ (1:size(y, 1))\n if offset == 0\n Plots.areaplot!(x, y; label = [\"thermal\" \"solar\" \"discharge\"])\n Plots.areaplot!(x, -sol[:c]; label = \"charge\")\n else\n Plots.areaplot!(x, y; label = false)\n Plots.areaplot!(x, -sol[:c]; label = false)\n end\n return plot\nend\n\nPlots.plot(\n [plot_solution(sol; offset) for (offset, sol) in sol_windows]...;\n layout = (length(sol_windows), 1),\n size = (600, 800),\n margin = 3Plots.mm,\n)","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"We can re-use the function to plot the recovered solution of the full problem:","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"plot_solution(sol_complete; offset = 0, xlabel = \"Hour\")","category":"page"},{"location":"tutorials/algorithms/rolling_horizon/#Final-remark","page":"Rolling horizon problems","title":"Final remark","text":"","category":"section"},{"location":"tutorials/algorithms/rolling_horizon/","page":"Rolling horizon problems","title":"Rolling horizon problems","text":"ParametricOptInterface.jl offers an easy way to update the parameters of an optimization problem that will be solved several times, as in the rolling horizon implementation. It has the benefit of avoiding rebuilding the model each time we want to solve it with new information in a new window.","category":"page"},{"location":"moi/reference/models/","page":"Models","title":"Models","text":"EditURL = \"https://github.com/jump-dev/MathOptInterface.jl/blob/v1.34.0/docs/src/reference/models.md\"","category":"page"},{"location":"moi/reference/models/","page":"Models","title":"Models","text":"CurrentModule = MathOptInterface\nDocTestSetup = quote\n import MathOptInterface as MOI\nend\nDocTestFilters = [r\"MathOptInterface|MOI\"]","category":"page"},{"location":"moi/reference/models/#Models","page":"Models","title":"Models","text":"","category":"section"},{"location":"moi/reference/models/#Attribute-interface","page":"Models","title":"Attribute interface","text":"","category":"section"},{"location":"moi/reference/models/","page":"Models","title":"Models","text":"is_set_by_optimize\nis_copyable\nget\nget!\nset\nsupports\nattribute_value_type","category":"page"},{"location":"moi/reference/models/#MathOptInterface.is_set_by_optimize","page":"Models","title":"MathOptInterface.is_set_by_optimize","text":"is_set_by_optimize(::AnyAttribute)\n\nReturn a Bool indicating whether the value of the attribute is set during an optimize! call, that is, the attribute is used to query the result of the optimization.\n\nIf an attribute can be set by the user, define is_copyable instead.\n\nAn attribute cannot be both is_copyable and is_set_by_optimize.\n\nDefault fallback\n\nThis function returns false by default so it should be implemented for attributes that are set by optimize!.\n\nUndefined behavior\n\nQuerying the value of the attribute that is_set_by_optimize before a call to optimize! is undefined and depends on solver-specific behavior.\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/models/#MathOptInterface.is_copyable","page":"Models","title":"MathOptInterface.is_copyable","text":"is_copyable(::AnyAttribute)\n\nReturn a Bool indicating whether the value of the attribute may be copied during copy_to using set.\n\nIf an attribute is_copyable, then it cannot be modified by the optimizer, and get must always return the value that was set by the user.\n\nIf an attribute is the result of an optimization, define is_set_by_optimize instead.\n\nAn attribute cannot be both is_set_by_optimize and is_copyable.\n\nDefault fallback\n\nBy default is_copyable(attr) returns !is_set_by_optimize(attr), which is most probably true.\n\nIf an attribute should not be copied, define is_copyable(::MyAttribute) = false.\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/models/#MathOptInterface.get","page":"Models","title":"MathOptInterface.get","text":"MOI.get(b::AbstractBridge, ::MOI.NumberOfVariables)::Int64\n\nReturn the number of variables created by the bridge b in the model.\n\nSee also MOI.NumberOfConstraints.\n\nImplementation notes\n\nThere is a default fallback, so you need only implement this if the bridge adds new variables.\n\n\n\n\n\nMOI.get(b::AbstractBridge, ::MOI.ListOfVariableIndices)\n\nReturn the list of variables created by the bridge b.\n\nSee also MOI.ListOfVariableIndices.\n\nImplementation notes\n\nThere is a default fallback, so you need only implement this if the bridge adds new variables.\n\n\n\n\n\nMOI.get(b::AbstractBridge, ::MOI.NumberOfConstraints{F,S})::Int64 where {F,S}\n\nReturn the number of constraints of the type F-in-S created by the bridge b.\n\nSee also MOI.NumberOfConstraints.\n\nImplementation notes\n\nThere is a default fallback, so you need only implement this for the constraint types returned by added_constraint_types.\n\n\n\n\n\nMOI.get(b::AbstractBridge, ::MOI.ListOfConstraintIndices{F,S}) where {F,S}\n\nReturn a Vector{ConstraintIndex{F,S}} with indices of all constraints of type F-in-S created by the bride b.\n\nSee also MOI.ListOfConstraintIndices.\n\nImplementation notes\n\nThere is a default fallback, so you need only implement this for the constraint types returned by added_constraint_types.\n\n\n\n\n\nfunction MOI.get(\n model::MOI.ModelLike,\n attr::MOI.AbstractConstraintAttribute,\n bridge::AbstractBridge,\n)\n\nReturn the value of the attribute attr of the model model for the constraint bridged by bridge.\n\n\n\n\n\nget(model::GenericModel, attr::MathOptInterface.AbstractOptimizerAttribute)\n\nReturn the value of the attribute attr from the model's MOI backend.\n\n\n\n\n\nget(model::GenericModel, attr::MathOptInterface.AbstractModelAttribute)\n\nReturn the value of the attribute attr from the model's MOI backend.\n\n\n\n\n\nget(optimizer::AbstractOptimizer, attr::AbstractOptimizerAttribute)\n\nReturn an attribute attr of the optimizer optimizer.\n\nget(model::ModelLike, attr::AbstractModelAttribute)\n\nReturn an attribute attr of the model model.\n\nget(model::ModelLike, attr::AbstractVariableAttribute, v::VariableIndex)\n\nIf the attribute attr is set for the variable v in the model model, return its value, return nothing otherwise. If the attribute attr is not supported by model then an error should be thrown instead of returning nothing.\n\nget(model::ModelLike, attr::AbstractVariableAttribute, v::Vector{VariableIndex})\n\nReturn a vector of attributes corresponding to each variable in the collection v in the model model.\n\nget(model::ModelLike, attr::AbstractConstraintAttribute, c::ConstraintIndex)\n\nIf the attribute attr is set for the constraint c in the model model, return its value, return nothing otherwise. If the attribute attr is not supported by model then an error should be thrown instead of returning nothing.\n\nget(\n model::ModelLike,\n attr::AbstractConstraintAttribute,\n c::Vector{ConstraintIndex{F,S}},\n) where {F,S}\n\nReturn a vector of attributes corresponding to each constraint in the collection c in the model model.\n\nget(model::ModelLike, ::Type{VariableIndex}, name::String)\n\nIf a variable with name name exists in the model model, return the corresponding index, otherwise return nothing. Errors if two variables have the same name.\n\nget(\n model::ModelLike,\n ::Type{ConstraintIndex{F,S}},\n name::String,\n) where {F,S}\n\nIf an F-in-S constraint with name name exists in the model model, return the corresponding index, otherwise return nothing. Errors if two constraints have the same name.\n\nget(model::ModelLike, ::Type{ConstraintIndex}, name::String)\n\nIf any constraint with name name exists in the model model, return the corresponding index, otherwise return nothing. This version is available for convenience but may incur a performance penalty because it is not type stable. Errors if two constraints have the same name.\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/models/#MathOptInterface.get!","page":"Models","title":"MathOptInterface.get!","text":"get!(output, model::ModelLike, args...)\n\nAn in-place version of get.\n\nThe signature matches that of get except that the result is placed in the vector output.\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/models/#MathOptInterface.set","page":"Models","title":"MathOptInterface.set","text":"function MOI.set(\n model::MOI.ModelLike,\n attr::MOI.AbstractConstraintAttribute,\n bridge::AbstractBridge,\n value,\n)\n\nSet the value of the attribute attr of the model model for the constraint bridged by bridge.\n\n\n\n\n\nset(optimizer::AbstractOptimizer, attr::AbstractOptimizerAttribute, value)\n\nAssign value to the attribute attr of the optimizer optimizer.\n\nset(model::ModelLike, attr::AbstractModelAttribute, value)\n\nAssign value to the attribute attr of the model model.\n\nset(model::ModelLike, attr::AbstractVariableAttribute, v::VariableIndex, value)\n\nAssign value to the attribute attr of variable v in model model.\n\nset(\n model::ModelLike,\n attr::AbstractVariableAttribute,\n v::Vector{VariableIndex},\n vector_of_values,\n)\n\nAssign a value respectively to the attribute attr of each variable in the collection v in model model.\n\nset(\n model::ModelLike,\n attr::AbstractConstraintAttribute,\n c::ConstraintIndex,\n value,\n)\n\nAssign a value to the attribute attr of constraint c in model model.\n\nset(\n model::ModelLike,\n attr::AbstractConstraintAttribute,\n c::Vector{ConstraintIndex{F,S}},\n vector_of_values,\n) where {F,S}\n\nAssign a value respectively to the attribute attr of each constraint in the collection c in model model.\n\nAn UnsupportedAttribute error is thrown if model does not support the attribute attr (see supports) and a SetAttributeNotAllowed error is thrown if it supports the attribute attr but it cannot be set.\n\nset(\n model::ModelLike,\n ::ConstraintSet,\n c::ConstraintIndex{F,S},\n set::S,\n) where {F,S}\n\nChange the set of constraint c to the new set set which should be of the same type as the original set.\n\nset(\n model::ModelLike,\n ::ConstraintFunction,\n c::ConstraintIndex{F,S},\n func::F,\n) where {F,S}\n\nReplace the function in constraint c with func. F must match the original function type used to define the constraint.\n\nnote: Note\nSetting the constraint function is not allowed if F is VariableIndex; a SettingVariableIndexNotAllowed error is thrown instead. This is because, it would require changing the index c since the index of VariableIndex constraints must be the same as the index of the variable.\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/models/#MathOptInterface.supports","page":"Models","title":"MathOptInterface.supports","text":"MOI.supports(\n model::MOI.ModelLike,\n attr::MOI.AbstractConstraintAttribute,\n BT::Type{<:AbstractBridge},\n)\n\nReturn a Bool indicating whether BT supports setting attr to model.\n\n\n\n\n\nsupports(model::ModelLike, sub::AbstractSubmittable)::Bool\n\nReturn a Bool indicating whether model supports the submittable sub.\n\nsupports(model::ModelLike, attr::AbstractOptimizerAttribute)::Bool\n\nReturn a Bool indicating whether model supports the optimizer attribute attr. That is, it returns false if copy_to(model, src) shows a warning in case attr is in the ListOfOptimizerAttributesSet of src; see copy_to for more details on how unsupported optimizer attributes are handled in copy.\n\nsupports(model::ModelLike, attr::AbstractModelAttribute)::Bool\n\nReturn a Bool indicating whether model supports the model attribute attr. That is, it returns false if copy_to(model, src) cannot be performed in case attr is in the ListOfModelAttributesSet of src.\n\nsupports(\n model::ModelLike,\n attr::AbstractVariableAttribute,\n ::Type{VariableIndex},\n)::Bool\n\nReturn a Bool indicating whether model supports the variable attribute attr. That is, it returns false if copy_to(model, src) cannot be performed in case attr is in the ListOfVariableAttributesSet of src.\n\nsupports(\n model::ModelLike,\n attr::AbstractConstraintAttribute,\n ::Type{ConstraintIndex{F,S}},\n)::Bool where {F,S}\n\nReturn a Bool indicating whether model supports the constraint attribute attr applied to an F-in-S constraint. That is, it returns false if copy_to(model, src) cannot be performed in case attr is in the ListOfConstraintAttributesSet of src.\n\nFor all five methods, if the attribute is only not supported in specific circumstances, it should still return true.\n\nNote that supports is only defined for attributes for which is_copyable returns true as other attributes do not appear in the list of attributes set obtained by ListOfXXXAttributesSet.\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/models/#MathOptInterface.attribute_value_type","page":"Models","title":"MathOptInterface.attribute_value_type","text":"attribute_value_type(attr::AnyAttribute)\n\nGiven an attribute attr, return the type of value expected by get, or returned by set.\n\nNotes\n\nOnly implement this if it make sense to do so. If un-implemented, the default is Any.\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/models/#Model-interface","page":"Models","title":"Model interface","text":"","category":"section"},{"location":"moi/reference/models/","page":"Models","title":"Models","text":"ModelLike\nis_empty\nempty!\nwrite_to_file\nread_from_file\nsupports_incremental_interface\ncopy_to\nIndexMap","category":"page"},{"location":"moi/reference/models/#MathOptInterface.ModelLike","page":"Models","title":"MathOptInterface.ModelLike","text":"ModelLike\n\nAbstract supertype for objects that implement the \"Model\" interface for defining an optimization problem.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.is_empty","page":"Models","title":"MathOptInterface.is_empty","text":"is_empty(model::ModelLike)\n\nReturns false if the model has any model attribute set or has any variables or constraints.\n\nNote that an empty model can have optimizer attributes set.\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/models/#MathOptInterface.empty!","page":"Models","title":"MathOptInterface.empty!","text":"empty!(model::ModelLike)\n\nEmpty the model, that is, remove all variables, constraints and model attributes but not optimizer attributes.\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/models/#MathOptInterface.write_to_file","page":"Models","title":"MathOptInterface.write_to_file","text":"write_to_file(model::ModelLike, filename::String)\n\nWrite the current model to the file at filename.\n\nSupported file types depend on the model type.\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/models/#MathOptInterface.read_from_file","page":"Models","title":"MathOptInterface.read_from_file","text":"read_from_file(model::ModelLike, filename::String)\n\nRead the file filename into the model model. If model is non-empty, this may throw an error.\n\nSupported file types depend on the model type.\n\nNote\n\nOnce the contents of the file are loaded into the model, users can query the variables via get(model, ListOfVariableIndices()). However, some filetypes, such as LP files, do not maintain an explicit ordering of the variables. Therefore, the returned list may be in an arbitrary order.\n\nTo avoid depending on the order of the indices, look up each variable index by name using get(model, VariableIndex, \"name\").\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/models/#MathOptInterface.supports_incremental_interface","page":"Models","title":"MathOptInterface.supports_incremental_interface","text":"supports_incremental_interface(model::ModelLike)\n\nReturn a Bool indicating whether model supports building incrementally via add_variable and add_constraint.\n\nThe main purpose of this function is to determine whether a model can be loaded into model incrementally or whether it should be cached and copied at once instead.\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/models/#MathOptInterface.copy_to","page":"Models","title":"MathOptInterface.copy_to","text":"copy_to(dest::ModelLike, src::ModelLike)::IndexMap\n\nCopy the model from src into dest.\n\nThe target dest is emptied, and all previous indices to variables and constraints in dest are invalidated.\n\nReturns an IndexMap object that translates variable and constraint indices from the src model to the corresponding indices in the dest model.\n\nNotes\n\nIf a constraint that in src is not supported by dest, then an UnsupportedConstraint error is thrown.\nIf an AbstractModelAttribute, AbstractVariableAttribute, or AbstractConstraintAttribute is set in src but not supported by dest, then an UnsupportedAttribute error is thrown.\n\nAbstractOptimizerAttributes are not copied to the dest model.\n\nIndexMap\n\nImplementations of copy_to must return an IndexMap. For technical reasons, this type is defined in the Utilities submodule as MOI.Utilities.IndexMap. However, since it is an integral part of the MOI API, we provide MOI.IndexMap as an alias.\n\nExample\n\n# Given empty `ModelLike` objects `src` and `dest`.\n\nx = add_variable(src)\n\nis_valid(src, x) # true\nis_valid(dest, x) # false (`dest` has no variables)\n\nindex_map = copy_to(dest, src)\nis_valid(dest, x) # false (unless index_map[x] == x)\nis_valid(dest, index_map[x]) # true\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/models/#MathOptInterface.IndexMap","page":"Models","title":"MathOptInterface.IndexMap","text":"IndexMap()\n\nThe dictionary-like object returned by copy_to.\n\nIndexMap\n\nImplementations of copy_to must return an IndexMap. For technical reasons, the IndexMap type is defined in the Utilities submodule as MOI.Utilities.IndexMap. However, since it is an integral part of the MOI API, we provide this MOI.IndexMap as an alias.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#Model-attributes","page":"Models","title":"Model attributes","text":"","category":"section"},{"location":"moi/reference/models/","page":"Models","title":"Models","text":"AbstractModelAttribute\nName\nObjectiveFunction\nObjectiveFunctionType\nObjectiveSense\nOptimizationSense\nMIN_SENSE\nMAX_SENSE\nFEASIBILITY_SENSE\nNumberOfVariables\nListOfVariableIndices\nListOfConstraintTypesPresent\nNumberOfConstraints\nListOfConstraintIndices\nListOfOptimizerAttributesSet\nListOfModelAttributesSet\nListOfVariableAttributesSet\nListOfVariablesWithAttributeSet\nListOfConstraintAttributesSet\nListOfConstraintsWithAttributeSet\nUserDefinedFunction\nListOfSupportedNonlinearOperators","category":"page"},{"location":"moi/reference/models/#MathOptInterface.AbstractModelAttribute","page":"Models","title":"MathOptInterface.AbstractModelAttribute","text":"AbstractModelAttribute\n\nAbstract supertype for attribute objects that can be used to set or get attributes (properties) of the model.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.Name","page":"Models","title":"MathOptInterface.Name","text":"Name()\n\nA model attribute for the string identifying the model. It has a default value of \"\" if not set`.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.ObjectiveFunction","page":"Models","title":"MathOptInterface.ObjectiveFunction","text":"ObjectiveFunction{F<:AbstractScalarFunction}()\n\nA model attribute for the objective function which has a type F<:AbstractScalarFunction.\n\nF should be guaranteed to be equivalent but not necessarily identical to the function type provided by the user.\n\nThrows an InexactError if the objective function cannot be converted to F, for example, the objective function is quadratic and F is ScalarAffineFunction{Float64} or it has non-integer coefficient and F is ScalarAffineFunction{Int}.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.ObjectiveFunctionType","page":"Models","title":"MathOptInterface.ObjectiveFunctionType","text":"ObjectiveFunctionType()\n\nA model attribute for the type F of the objective function set using the ObjectiveFunction{F} attribute.\n\nExample\n\njulia> import MathOptInterface as MOI\n\njulia> model = MOI.Utilities.Model{Float64}();\n\njulia> x = MOI.add_variable(model)\nMOI.VariableIndex(1)\n\njulia> MOI.set(model, MOI.ObjectiveFunction{MOI.VariableIndex}(), x)\n\njulia> MOI.get(model, MOI.ObjectiveFunctionType())\nMathOptInterface.VariableIndex\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.ObjectiveSense","page":"Models","title":"MathOptInterface.ObjectiveSense","text":"ObjectiveSense()\n\nA model attribute for the objective sense of the objective function, which must be an OptimizationSense: MIN_SENSE, MAX_SENSE, or FEASIBILITY_SENSE. The default is FEASIBILITY_SENSE.\n\nInteraction with ObjectiveFunction\n\nSetting the sense to FEASIBILITY_SENSE unsets the ObjectiveFunction attribute. That is, if you first set ObjectiveFunction and then set ObjectiveSense to be FEASIBILITY_SENSE, no objective function will be passed to the solver.\n\nIn addition, some reformulations of ObjectiveFunction via bridges rely on the value of ObjectiveSense. Therefore, you should set ObjectiveSense before setting ObjectiveFunction.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.OptimizationSense","page":"Models","title":"MathOptInterface.OptimizationSense","text":"OptimizationSense\n\nAn enum for the value of the ObjectiveSense attribute.\n\nValues\n\nPossible values are:\n\nMIN_SENSE: the goal is to minimize the objective function\nMAX_SENSE: the goal is to maximize the objective function\nFEASIBILITY_SENSE: the model does not have an objective function\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.MIN_SENSE","page":"Models","title":"MathOptInterface.MIN_SENSE","text":"MIN_SENSE::OptimizationSense\n\nAn instance of the OptimizationSense enum.\n\nMIN_SENSE: the goal is to minimize the objective function\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.MAX_SENSE","page":"Models","title":"MathOptInterface.MAX_SENSE","text":"MAX_SENSE::OptimizationSense\n\nAn instance of the OptimizationSense enum.\n\nMAX_SENSE: the goal is to maximize the objective function\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.FEASIBILITY_SENSE","page":"Models","title":"MathOptInterface.FEASIBILITY_SENSE","text":"FEASIBILITY_SENSE::OptimizationSense\n\nAn instance of the OptimizationSense enum.\n\nFEASIBILITY_SENSE: the model does not have an objective function\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.NumberOfVariables","page":"Models","title":"MathOptInterface.NumberOfVariables","text":"NumberOfVariables()\n\nA model attribute for the number of variables in the model.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.ListOfVariableIndices","page":"Models","title":"MathOptInterface.ListOfVariableIndices","text":"ListOfVariableIndices()\n\nA model attribute for the Vector{VariableIndex} of all variable indices present in the model (that is, of length equal to the value of NumberOfVariables in the order in which they were added.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.ListOfConstraintTypesPresent","page":"Models","title":"MathOptInterface.ListOfConstraintTypesPresent","text":"ListOfConstraintTypesPresent()\n\nA model attribute for the list of tuples of the form (F,S), where F is a function type and S is a set type indicating that the attribute NumberOfConstraints{F,S} has a value greater than zero.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.NumberOfConstraints","page":"Models","title":"MathOptInterface.NumberOfConstraints","text":"NumberOfConstraints{F,S}()\n\nA model attribute for the number of constraints of the type F-in-S present in the model.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.ListOfConstraintIndices","page":"Models","title":"MathOptInterface.ListOfConstraintIndices","text":"ListOfConstraintIndices{F,S}()\n\nA model attribute for the Vector{ConstraintIndex{F,S}} of all constraint indices of type F-in-S in the model (that is, of length equal to the value of NumberOfConstraints{F,S}) in the order in which they were added.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.ListOfOptimizerAttributesSet","page":"Models","title":"MathOptInterface.ListOfOptimizerAttributesSet","text":"ListOfOptimizerAttributesSet()\n\nAn optimizer attribute for the Vector{AbstractOptimizerAttribute} of all optimizer attributes that were set.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.ListOfModelAttributesSet","page":"Models","title":"MathOptInterface.ListOfModelAttributesSet","text":"ListOfModelAttributesSet()\n\nA model attribute for the Vector{AbstractModelAttribute} of all model attributes attr such that:\n\nis_copyable(attr) returns true, and\nthe attribute was set to the model\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.ListOfVariableAttributesSet","page":"Models","title":"MathOptInterface.ListOfVariableAttributesSet","text":"ListOfVariableAttributesSet()\n\nA model attribute for the Vector{AbstractVariableAttribute} of all variable attributes attr such that 1) is_copyable(attr) returns true and 2) the attribute was set to variables.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.ListOfVariablesWithAttributeSet","page":"Models","title":"MathOptInterface.ListOfVariablesWithAttributeSet","text":"ListOfVariablesWithAttributeSet(attr::AbstractVariableAttribute)\n\nA model attribute for the Vector{VariableIndex} of all variables with the attribute attr set.\n\nThe returned list may not be minimal, so some elements may have their default value set.\n\nNote\n\nThis is an optional attribute to implement. The default fallback is to get ListOfVariableIndices.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.ListOfConstraintAttributesSet","page":"Models","title":"MathOptInterface.ListOfConstraintAttributesSet","text":"ListOfConstraintAttributesSet{F, S}()\n\nA model attribute for the Vector{AbstractConstraintAttribute} of all constraint attributes attr such that:\n\nis_copyable(attr) returns true and\nthe attribute was set to F-in-S constraints.\n\nNote\n\nThe attributes ConstraintFunction and ConstraintSet should not be included in the list even if then have been set with set.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.ListOfConstraintsWithAttributeSet","page":"Models","title":"MathOptInterface.ListOfConstraintsWithAttributeSet","text":"ListOfConstraintsWithAttributeSet{F,S}(attr:AbstractConstraintAttribute)\n\nA model attribute for the Vector{ConstraintIndex{F,S}} of all constraints with the attribute attr set.\n\nThe returned list may not be minimal, so some elements may have their default value set.\n\nNote\n\nThis is an optional attribute to implement. The default fallback is to get ListOfConstraintIndices.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.UserDefinedFunction","page":"Models","title":"MathOptInterface.UserDefinedFunction","text":"UserDefinedFunction(name::Symbol, arity::Int) <: AbstractModelAttribute\n\nSet this attribute to register a user-defined function by the name of name with arity arguments.\n\nOnce registered, name will appear in ListOfSupportedNonlinearOperators.\n\nYou cannot register multiple UserDefinedFunctions with the same name but different arity.\n\nValue type\n\nThe value to be set is a tuple containing one, two, or three functions to evaluate the function, the first-order derivative, and the second-order derivative respectively. Both derivatives are optional, but if you pass the second-order derivative you must also pass the first-order derivative.\n\nFor univariate functions with arity == 1, the functions in the tuple must have the form:\n\nf(x::T)::T: returns the value of the function at x\n∇f(x::T)::T: returns the first-order derivative of f with respect to x\n∇²f(x::T)::T: returns the second-order derivative of f with respect to x.\n\nFor multivariate functions with arity > 1, the functions in the tuple must have the form:\n\nf(x::T...)::T: returns the value of the function at x\n∇f(g::AbstractVector{T}, x::T...)::Nothing: fills the components of g, with g[i] being the first-order partial derivative of f with respect to x[i]\n∇²f(H::AbstractMatrix{T}, x::T...)::Nothing: fills the non-zero components of H, with H[i, j] being the second-order partial derivative of f with respect to x[i] and then x[j]. H is initialized to the zero matrix, so you do not need to set any zero elements.\n\nExample\n\njulia> import MathOptInterface as MOI\n\njulia> f(x, y) = x^2 + y^2\nf (generic function with 1 method)\n\njulia> function ∇f(g, x, y)\n g .= 2 * x, 2 * y\n return\n end\n∇f (generic function with 1 method)\n\njulia> function ∇²f(H, x...)\n H[1, 1] = H[2, 2] = 2.0\n return\n end\n∇²f (generic function with 1 method)\n\njulia> model = MOI.Utilities.UniversalFallback(MOI.Utilities.Model{Float64}());\n\njulia> MOI.set(model, MOI.UserDefinedFunction(:f, 2), (f,))\n\njulia> MOI.set(model, MOI.UserDefinedFunction(:g, 2), (f, ∇f))\n\njulia> MOI.set(model, MOI.UserDefinedFunction(:h, 2), (f, ∇f, ∇²f))\n\njulia> x = MOI.add_variables(model, 2)\n2-element Vector{MathOptInterface.VariableIndex}:\n MOI.VariableIndex(1)\n MOI.VariableIndex(2)\n\njulia> MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)\n\njulia> obj_f = MOI.ScalarNonlinearFunction(:f, Any[x[1], x[2]])\nf(MOI.VariableIndex(1), MOI.VariableIndex(2))\n\njulia> MOI.set(model, MOI.ObjectiveFunction{typeof(obj_f)}(), obj_f)\n\njulia> print(model)\nMinimize ScalarNonlinearFunction:\n f(v[1], v[2])\n\nSubject to:\n\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.ListOfSupportedNonlinearOperators","page":"Models","title":"MathOptInterface.ListOfSupportedNonlinearOperators","text":"ListOfSupportedNonlinearOperators() <: AbstractModelAttribute\n\nWhen queried with get, return a Vector{Symbol} listing the operators supported by the model.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#Optimizer-interface","page":"Models","title":"Optimizer interface","text":"","category":"section"},{"location":"moi/reference/models/","page":"Models","title":"Models","text":"AbstractOptimizer\nOptimizerWithAttributes\noptimize!\noptimize!(::ModelLike, ::ModelLike)\ninstantiate\ndefault_cache","category":"page"},{"location":"moi/reference/models/#MathOptInterface.AbstractOptimizer","page":"Models","title":"MathOptInterface.AbstractOptimizer","text":"AbstractOptimizer <: ModelLike\n\nAbstract supertype for objects representing an instance of an optimization problem tied to a particular solver. This is typically a solver's in-memory representation. In addition to ModelLike, AbstractOptimizer objects let you solve the model and query the solution.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.OptimizerWithAttributes","page":"Models","title":"MathOptInterface.OptimizerWithAttributes","text":"struct OptimizerWithAttributes\n optimizer_constructor\n params::Vector{Pair{AbstractOptimizerAttribute,<:Any}}\nend\n\nObject grouping an optimizer constructor and a list of optimizer attributes. Instances are created with instantiate.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.optimize!","page":"Models","title":"MathOptInterface.optimize!","text":"optimize!(optimizer::AbstractOptimizer)\n\nOptimize the problem contained in optimizer.\n\nBefore calling optimize!, the problem should first be constructed using the incremental interface (see supports_incremental_interface) or copy_to.\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/models/#MathOptInterface.optimize!-Tuple{MathOptInterface.ModelLike, MathOptInterface.ModelLike}","page":"Models","title":"MathOptInterface.optimize!","text":"optimize!(dest::AbstractOptimizer, src::ModelLike)::Tuple{IndexMap,Bool}\n\nA \"one-shot\" call that copies the problem from src into dest and then uses dest to optimize the problem.\n\nReturns a tuple of an IndexMap and a Bool copied.\n\nThe IndexMap object translates variable and constraint indices from the src model to the corresponding indices in the dest optimizer. See copy_to for details.\nIf copied == true, src was copied to dest and then cached, allowing incremental modification if supported by the solver.\nIf copied == false, a cache of the model was not kept in dest. Therefore, only the solution information (attributes for which is_set_by_optimize is true) is available to query.\n\nnote: Note\nThe main purpose of optimize! method with two arguments is for use in Utilities.CachingOptimizer.\n\nRelationship to the single-argument optimize!\n\nThe default fallback of optimize!(dest::AbstractOptimizer, src::ModelLike) is\n\nfunction optimize!(dest::AbstractOptimizer, src::ModelLike)\n index_map = copy_to(dest, src)\n optimize!(dest)\n return index_map, true\nend\n\nTherefore, subtypes of AbstractOptimizer should either implement this two-argument method, or implement both copy_to(::Optimizer, ::ModelLike) and optimize!(::Optimizer).\n\n\n\n\n\n","category":"method"},{"location":"moi/reference/models/#MathOptInterface.instantiate","page":"Models","title":"MathOptInterface.instantiate","text":"instantiate(\n optimizer_constructor,\n with_cache_type::Union{Nothing,Type} = nothing,\n with_bridge_type::Union{Nothing,Type} = nothing,\n)\n\nCreate an instance of an optimizer by either:\n\ncalling optimizer_constructor.optimizer_constructor() and setting the parameters in optimizer_constructor.params if optimizer_constructor is a OptimizerWithAttributes\ncalling optimizer_constructor() if optimizer_constructor is callable.\n\nwithcachetype\n\nIf with_cache_type is not nothing, then the optimizer is wrapped in a Utilities.CachingOptimizer to store a cache of the model. This is most useful if the optimizer you are constructing does not support the incremental interface (see supports_incremental_interface).\n\nwithbridgetype\n\nIf with_bridge_type is not nothing, the optimizer is wrapped in a Bridges.full_bridge_optimizer, enabling all the bridges defined in the MOI.Bridges submodule with coefficient type with_bridge_type.\n\nIn addition, if the optimizer created by optimizer_constructor does not support the incremental interface (see supports_incremental_interface), then, irrespective of with_cache_type, the optimizer is wrapped in a Utilities.CachingOptimizer to store a cache of the bridged model.\n\nIf with_cache_type and with_bridge_type are both not nothing, then they must be the same type.\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/models/#MathOptInterface.default_cache","page":"Models","title":"MathOptInterface.default_cache","text":"default_cache(optimizer::ModelLike, ::Type{T}) where {T}\n\nReturn a new instance of the default model type to be used as cache for optimizer in a Utilities.CachingOptimizer for holding constraints of coefficient type T. By default, this returns Utilities.UniversalFallback(Utilities.Model{T}()). If copying from a instance of a given model type is faster for optimizer then a new method returning an instance of this model type should be defined.\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/models/#Optimizer-attributes","page":"Models","title":"Optimizer attributes","text":"","category":"section"},{"location":"moi/reference/models/","page":"Models","title":"Models","text":"AbstractOptimizerAttribute\nSolverName\nSolverVersion\nSilent\nTimeLimitSec\nObjectiveLimit\nSolutionLimit\nNodeLimit\nRawOptimizerAttribute\nNumberOfThreads\nRawSolver\nAbsoluteGapTolerance\nRelativeGapTolerance\nAutomaticDifferentiationBackend","category":"page"},{"location":"moi/reference/models/#MathOptInterface.AbstractOptimizerAttribute","page":"Models","title":"MathOptInterface.AbstractOptimizerAttribute","text":"AbstractOptimizerAttribute\n\nAbstract supertype for attribute objects that can be used to set or get attributes (properties) of the optimizer.\n\nNotes\n\nThe difference between AbstractOptimizerAttribute and AbstractModelAttribute lies in the behavior of is_empty, empty! and copy_to. Typically optimizer attributes affect only how the model is solved.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.SolverName","page":"Models","title":"MathOptInterface.SolverName","text":"SolverName()\n\nAn optimizer attribute for the string identifying the solver/optimizer.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.SolverVersion","page":"Models","title":"MathOptInterface.SolverVersion","text":"SolverVersion()\n\nAn optimizer attribute for the string identifying the version of the solver.\n\nnote: Note\nFor solvers supporting semantic versioning, the SolverVersion should be a string of the form \"vMAJOR.MINOR.PATCH\", so that it can be converted to a Julia VersionNumber (for example, `VersionNumber(\"v1.2.3\")).We do not require Semantic Versioning because some solvers use alternate versioning systems. For example, CPLEX uses Calendar Versioning, so SolverVersion will return a string like \"202001\".\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.Silent","page":"Models","title":"MathOptInterface.Silent","text":"Silent()\n\nAn optimizer attribute for silencing the output of an optimizer. When set to true, it takes precedence over any other attribute controlling verbosity and requires the solver to produce no output. The default value is false which has no effect. In this case the verbosity is controlled by other attributes.\n\nNote\n\nEvery optimizer should have verbosity on by default. For instance, if a solver has a solver-specific log level attribute, the MOI implementation should set it to 1 by default. If the user sets Silent to true, then the log level should be set to 0, even if the user specifically sets a value of log level. If the value of Silent is false then the log level set to the solver is the value given by the user for this solver-specific parameter or 1 if none is given.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.TimeLimitSec","page":"Models","title":"MathOptInterface.TimeLimitSec","text":"TimeLimitSec()\n\nAn optimizer attribute for setting a time limit (in seconds) for an optimization. When set to nothing, it deactivates the solver time limit. The default value is nothing.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.ObjectiveLimit","page":"Models","title":"MathOptInterface.ObjectiveLimit","text":"ObjectiveLimit()\n\nAn optimizer attribute for setting a limit on the objective value.\n\nThe provided limit must be a Union{Real,Nothing}.\n\nWhen set to nothing, the limit reverts to the solver's default.\n\nThe default value is nothing.\n\nThe solver may stop when the ObjectiveValue is better (lower for minimization, higher for maximization) than the ObjectiveLimit. If stopped, the TerminationStatus should be OBJECTIVE_LIMIT.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.SolutionLimit","page":"Models","title":"MathOptInterface.SolutionLimit","text":"SolutionLimit()\n\nAn optimizer attribute for setting a limit on the number of available feasible solutions.\n\nDefault values\n\nThe provided limit must be a Union{Nothing,Int}.\n\nWhen set to nothing, the limit reverts to the solver's default.\n\nThe default value is nothing.\n\nTermination criteria\n\nThe solver may stop when the ResultCount is larger than or equal to the SolutionLimit. If stopped because of this attribute, the TerminationStatus must be SOLUTION_LIMIT.\n\nSolution quality\n\nThe quality of the available solutions is solver-dependent. The set of resulting solutions is not guaranteed to contain an optimal solution.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.NodeLimit","page":"Models","title":"MathOptInterface.NodeLimit","text":"NodeLimit()\n\nAn optimizer attribute for setting a limit on the number of branch-and-bound nodes explored by a mixed-integer program (MIP) solver.\n\nDefault values\n\nThe provided limit must be a Union{Nothing,Int}.\n\nWhen set to nothing, the limit reverts to the solver's default.\n\nThe default value is nothing.\n\nTermination criteria\n\nThe solver may stop when the NodeCount is larger than or equal to the NodeLimit. If stopped because of this attribute, the TerminationStatus must be NODE_LIMIT.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.RawOptimizerAttribute","page":"Models","title":"MathOptInterface.RawOptimizerAttribute","text":"RawOptimizerAttribute(name::String)\n\nAn optimizer attribute for the solver-specific parameter identified by name.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.NumberOfThreads","page":"Models","title":"MathOptInterface.NumberOfThreads","text":"NumberOfThreads()\n\nAn optimizer attribute for setting the number of threads used for an optimization. When set to nothing uses solver default. Values are positive integers. The default value is nothing.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.RawSolver","page":"Models","title":"MathOptInterface.RawSolver","text":"RawSolver()\n\nA model attribute for the object that may be used to access a solver-specific API for this optimizer.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.AbsoluteGapTolerance","page":"Models","title":"MathOptInterface.AbsoluteGapTolerance","text":"AbsoluteGapTolerance()\n\nAn optimizer attribute for setting the absolute gap tolerance for an optimization. This is an optimizer attribute, and should be set before calling optimize!. When set to nothing (if supported), uses solver default.\n\nTo set a relative gap tolerance, see RelativeGapTolerance.\n\nwarning: Warning\nThe mathematical definition of \"absolute gap\", and its treatment during the optimization, are solver-dependent. However, assuming no other limit nor issue is encountered during the optimization, most solvers that implement this attribute will stop once f - b g_abs, where b is the best bound, f is the best feasible objective value, and g_abs is the absolute gap.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.RelativeGapTolerance","page":"Models","title":"MathOptInterface.RelativeGapTolerance","text":"RelativeGapTolerance()\n\nAn optimizer attribute for setting the relative gap tolerance for an optimization. This is an optimizer attribute, and should be set before calling optimize!. When set to nothing (if supported), uses solver default.\n\nIf you are looking for the relative gap of the current best solution, see RelativeGap. If no limit nor issue is encountered during the optimization, the value of RelativeGap should be at most as large as RelativeGapTolerance.\n\n# Before optimizing: set relative gap tolerance\n# set 0.1% relative gap tolerance\nMOI.set(model, MOI.RelativeGapTolerance(), 1e-3)\nMOI.optimize!(model)\n\n# After optimizing (assuming all went well)\n# The relative gap tolerance has not changed...\nMOI.get(model, MOI.RelativeGapTolerance()) # returns 1e-3\n# ... and the relative gap of the obtained solution is smaller or equal to the\n# tolerance\nMOI.get(model, MOI.RelativeGap()) # should return something ≤ 1e-3\n\nwarning: Warning\nThe mathematical definition of \"relative gap\", and its allowed range, are solver-dependent. Typically, solvers expect a value between 0.0 and 1.0.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.AutomaticDifferentiationBackend","page":"Models","title":"MathOptInterface.AutomaticDifferentiationBackend","text":"AutomaticDifferentiationBackend() <: AbstractOptimizerAttribute\n\nAn AbstractOptimizerAttribute for setting the automatic differentiation backend used by the solver.\n\nThe value must be a subtype of Nonlinear.AbstractAutomaticDifferentiation.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/","page":"Models","title":"Models","text":"List of attributes useful for optimizers","category":"page"},{"location":"moi/reference/models/","page":"Models","title":"Models","text":"TerminationStatus\nTerminationStatusCode\nOPTIMIZE_NOT_CALLED\nOPTIMAL\nINFEASIBLE\nDUAL_INFEASIBLE\nLOCALLY_SOLVED\nLOCALLY_INFEASIBLE\nINFEASIBLE_OR_UNBOUNDED\nALMOST_OPTIMAL\nALMOST_INFEASIBLE\nALMOST_DUAL_INFEASIBLE\nALMOST_LOCALLY_SOLVED\nITERATION_LIMIT\nTIME_LIMIT\nNODE_LIMIT\nSOLUTION_LIMIT\nMEMORY_LIMIT\nOBJECTIVE_LIMIT\nNORM_LIMIT\nOTHER_LIMIT\nSLOW_PROGRESS\nNUMERICAL_ERROR\nINVALID_MODEL\nINVALID_OPTION\nINTERRUPTED\nOTHER_ERROR\nPrimalStatus\nDualStatus\nRawStatusString\nResultCount\nObjectiveValue\nDualObjectiveValue\nObjectiveBound\nRelativeGap\nSolveTimeSec\nSimplexIterations\nBarrierIterations\nNodeCount","category":"page"},{"location":"moi/reference/models/#MathOptInterface.TerminationStatus","page":"Models","title":"MathOptInterface.TerminationStatus","text":"TerminationStatus()\n\nA model attribute for the TerminationStatusCode explaining why the optimizer stopped.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.TerminationStatusCode","page":"Models","title":"MathOptInterface.TerminationStatusCode","text":"TerminationStatusCode\n\nAn Enum of possible values for the TerminationStatus attribute. This attribute is meant to explain the reason why the optimizer stopped executing in the most recent call to optimize!.\n\nValues\n\nPossible values are:\n\nOPTIMIZE_NOT_CALLED: The algorithm has not started.\nOPTIMAL: The algorithm found a globally optimal solution.\nINFEASIBLE: The algorithm concluded that no feasible solution exists.\nDUAL_INFEASIBLE: The algorithm concluded that no dual bound exists for the problem. If, additionally, a feasible (primal) solution is known to exist, this status typically implies that the problem is unbounded, with some technical exceptions.\nLOCALLY_SOLVED: The algorithm converged to a stationary point, local optimal solution, could not find directions for improvement, or otherwise completed its search without global guarantees.\nLOCALLY_INFEASIBLE: The algorithm converged to an infeasible point or otherwise completed its search without finding a feasible solution, without guarantees that no feasible solution exists.\nINFEASIBLE_OR_UNBOUNDED: The algorithm stopped because it decided that the problem is infeasible or unbounded; this occasionally happens during MIP presolve.\nALMOST_OPTIMAL: The algorithm found a globally optimal solution to relaxed tolerances.\nALMOST_INFEASIBLE: The algorithm concluded that no feasible solution exists within relaxed tolerances.\nALMOST_DUAL_INFEASIBLE: The algorithm concluded that no dual bound exists for the problem within relaxed tolerances.\nALMOST_LOCALLY_SOLVED: The algorithm converged to a stationary point, local optimal solution, or could not find directions for improvement within relaxed tolerances.\nITERATION_LIMIT: An iterative algorithm stopped after conducting the maximum number of iterations.\nTIME_LIMIT: The algorithm stopped after a user-specified computation time.\nNODE_LIMIT: A branch-and-bound algorithm stopped because it explored a maximum number of nodes in the branch-and-bound tree.\nSOLUTION_LIMIT: The algorithm stopped because it found the required number of solutions. This is often used in MIPs to get the solver to return the first feasible solution it encounters.\nMEMORY_LIMIT: The algorithm stopped because it ran out of memory.\nOBJECTIVE_LIMIT: The algorithm stopped because it found a solution better than a minimum limit set by the user.\nNORM_LIMIT: The algorithm stopped because the norm of an iterate became too large.\nOTHER_LIMIT: The algorithm stopped due to a limit not covered by one of the _LIMIT_ statuses above.\nSLOW_PROGRESS: The algorithm stopped because it was unable to continue making progress towards the solution.\nNUMERICAL_ERROR: The algorithm stopped because it encountered unrecoverable numerical error.\nINVALID_MODEL: The algorithm stopped because the model is invalid.\nINVALID_OPTION: The algorithm stopped because it was provided an invalid option.\nINTERRUPTED: The algorithm stopped because of an interrupt signal.\nOTHER_ERROR: The algorithm stopped because of an error not covered by one of the statuses defined above.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.OPTIMIZE_NOT_CALLED","page":"Models","title":"MathOptInterface.OPTIMIZE_NOT_CALLED","text":"OPTIMIZE_NOT_CALLED::TerminationStatusCode\n\nAn instance of the TerminationStatusCode enum.\n\nOPTIMIZE_NOT_CALLED: The algorithm has not started.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.OPTIMAL","page":"Models","title":"MathOptInterface.OPTIMAL","text":"OPTIMAL::TerminationStatusCode\n\nAn instance of the TerminationStatusCode enum.\n\nOPTIMAL: The algorithm found a globally optimal solution.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.INFEASIBLE","page":"Models","title":"MathOptInterface.INFEASIBLE","text":"INFEASIBLE::TerminationStatusCode\n\nAn instance of the TerminationStatusCode enum.\n\nINFEASIBLE: The algorithm concluded that no feasible solution exists.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.DUAL_INFEASIBLE","page":"Models","title":"MathOptInterface.DUAL_INFEASIBLE","text":"DUAL_INFEASIBLE::TerminationStatusCode\n\nAn instance of the TerminationStatusCode enum.\n\nDUAL_INFEASIBLE: The algorithm concluded that no dual bound exists for the problem. If, additionally, a feasible (primal) solution is known to exist, this status typically implies that the problem is unbounded, with some technical exceptions.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.LOCALLY_SOLVED","page":"Models","title":"MathOptInterface.LOCALLY_SOLVED","text":"LOCALLY_SOLVED::TerminationStatusCode\n\nAn instance of the TerminationStatusCode enum.\n\nLOCALLY_SOLVED: The algorithm converged to a stationary point, local optimal solution, could not find directions for improvement, or otherwise completed its search without global guarantees.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.LOCALLY_INFEASIBLE","page":"Models","title":"MathOptInterface.LOCALLY_INFEASIBLE","text":"LOCALLY_INFEASIBLE::TerminationStatusCode\n\nAn instance of the TerminationStatusCode enum.\n\nLOCALLY_INFEASIBLE: The algorithm converged to an infeasible point or otherwise completed its search without finding a feasible solution, without guarantees that no feasible solution exists.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.INFEASIBLE_OR_UNBOUNDED","page":"Models","title":"MathOptInterface.INFEASIBLE_OR_UNBOUNDED","text":"INFEASIBLE_OR_UNBOUNDED::TerminationStatusCode\n\nAn instance of the TerminationStatusCode enum.\n\nINFEASIBLE_OR_UNBOUNDED: The algorithm stopped because it decided that the problem is infeasible or unbounded; this occasionally happens during MIP presolve.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.ALMOST_OPTIMAL","page":"Models","title":"MathOptInterface.ALMOST_OPTIMAL","text":"ALMOST_OPTIMAL::TerminationStatusCode\n\nAn instance of the TerminationStatusCode enum.\n\nALMOST_OPTIMAL: The algorithm found a globally optimal solution to relaxed tolerances.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.ALMOST_INFEASIBLE","page":"Models","title":"MathOptInterface.ALMOST_INFEASIBLE","text":"ALMOST_INFEASIBLE::TerminationStatusCode\n\nAn instance of the TerminationStatusCode enum.\n\nALMOST_INFEASIBLE: The algorithm concluded that no feasible solution exists within relaxed tolerances.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.ALMOST_DUAL_INFEASIBLE","page":"Models","title":"MathOptInterface.ALMOST_DUAL_INFEASIBLE","text":"ALMOST_DUAL_INFEASIBLE::TerminationStatusCode\n\nAn instance of the TerminationStatusCode enum.\n\nALMOST_DUAL_INFEASIBLE: The algorithm concluded that no dual bound exists for the problem within relaxed tolerances.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.ALMOST_LOCALLY_SOLVED","page":"Models","title":"MathOptInterface.ALMOST_LOCALLY_SOLVED","text":"ALMOST_LOCALLY_SOLVED::TerminationStatusCode\n\nAn instance of the TerminationStatusCode enum.\n\nALMOST_LOCALLY_SOLVED: The algorithm converged to a stationary point, local optimal solution, or could not find directions for improvement within relaxed tolerances.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.ITERATION_LIMIT","page":"Models","title":"MathOptInterface.ITERATION_LIMIT","text":"ITERATION_LIMIT::TerminationStatusCode\n\nAn instance of the TerminationStatusCode enum.\n\nITERATION_LIMIT: An iterative algorithm stopped after conducting the maximum number of iterations.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.TIME_LIMIT","page":"Models","title":"MathOptInterface.TIME_LIMIT","text":"TIME_LIMIT::TerminationStatusCode\n\nAn instance of the TerminationStatusCode enum.\n\nTIME_LIMIT: The algorithm stopped after a user-specified computation time.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.NODE_LIMIT","page":"Models","title":"MathOptInterface.NODE_LIMIT","text":"NODE_LIMIT::TerminationStatusCode\n\nAn instance of the TerminationStatusCode enum.\n\nNODE_LIMIT: A branch-and-bound algorithm stopped because it explored a maximum number of nodes in the branch-and-bound tree.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.SOLUTION_LIMIT","page":"Models","title":"MathOptInterface.SOLUTION_LIMIT","text":"SOLUTION_LIMIT::TerminationStatusCode\n\nAn instance of the TerminationStatusCode enum.\n\nSOLUTION_LIMIT: The algorithm stopped because it found the required number of solutions. This is often used in MIPs to get the solver to return the first feasible solution it encounters.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.MEMORY_LIMIT","page":"Models","title":"MathOptInterface.MEMORY_LIMIT","text":"MEMORY_LIMIT::TerminationStatusCode\n\nAn instance of the TerminationStatusCode enum.\n\nMEMORY_LIMIT: The algorithm stopped because it ran out of memory.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.OBJECTIVE_LIMIT","page":"Models","title":"MathOptInterface.OBJECTIVE_LIMIT","text":"OBJECTIVE_LIMIT::TerminationStatusCode\n\nAn instance of the TerminationStatusCode enum.\n\nOBJECTIVE_LIMIT: The algorithm stopped because it found a solution better than a minimum limit set by the user.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.NORM_LIMIT","page":"Models","title":"MathOptInterface.NORM_LIMIT","text":"NORM_LIMIT::TerminationStatusCode\n\nAn instance of the TerminationStatusCode enum.\n\nNORM_LIMIT: The algorithm stopped because the norm of an iterate became too large.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.OTHER_LIMIT","page":"Models","title":"MathOptInterface.OTHER_LIMIT","text":"OTHER_LIMIT::TerminationStatusCode\n\nAn instance of the TerminationStatusCode enum.\n\nOTHER_LIMIT: The algorithm stopped due to a limit not covered by one of the _LIMIT_ statuses above.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.SLOW_PROGRESS","page":"Models","title":"MathOptInterface.SLOW_PROGRESS","text":"SLOW_PROGRESS::TerminationStatusCode\n\nAn instance of the TerminationStatusCode enum.\n\nSLOW_PROGRESS: The algorithm stopped because it was unable to continue making progress towards the solution.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.NUMERICAL_ERROR","page":"Models","title":"MathOptInterface.NUMERICAL_ERROR","text":"NUMERICAL_ERROR::TerminationStatusCode\n\nAn instance of the TerminationStatusCode enum.\n\nNUMERICAL_ERROR: The algorithm stopped because it encountered unrecoverable numerical error.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.INVALID_MODEL","page":"Models","title":"MathOptInterface.INVALID_MODEL","text":"INVALID_MODEL::TerminationStatusCode\n\nAn instance of the TerminationStatusCode enum.\n\nINVALID_MODEL: The algorithm stopped because the model is invalid.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.INVALID_OPTION","page":"Models","title":"MathOptInterface.INVALID_OPTION","text":"INVALID_OPTION::TerminationStatusCode\n\nAn instance of the TerminationStatusCode enum.\n\nINVALID_OPTION: The algorithm stopped because it was provided an invalid option.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.INTERRUPTED","page":"Models","title":"MathOptInterface.INTERRUPTED","text":"INTERRUPTED::TerminationStatusCode\n\nAn instance of the TerminationStatusCode enum.\n\nINTERRUPTED: The algorithm stopped because of an interrupt signal.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.OTHER_ERROR","page":"Models","title":"MathOptInterface.OTHER_ERROR","text":"OTHER_ERROR::TerminationStatusCode\n\nAn instance of the TerminationStatusCode enum.\n\nOTHER_ERROR: The algorithm stopped because of an error not covered by one of the statuses defined above.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.PrimalStatus","page":"Models","title":"MathOptInterface.PrimalStatus","text":"PrimalStatus(result_index::Int = 1)\n\nA model attribute for the ResultStatusCode of the primal result result_index. If result_index is omitted, it defaults to 1.\n\nSee ResultCount for information on how the results are ordered.\n\nIf result_index is larger than the value of ResultCount then NO_SOLUTION is returned.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.DualStatus","page":"Models","title":"MathOptInterface.DualStatus","text":"DualStatus(result_index::Int = 1)\n\nA model attribute for the ResultStatusCode of the dual result result_index. If result_index is omitted, it defaults to 1.\n\nSee ResultCount for information on how the results are ordered.\n\nIf result_index is larger than the value of ResultCount then NO_SOLUTION is returned.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.RawStatusString","page":"Models","title":"MathOptInterface.RawStatusString","text":"RawStatusString()\n\nA model attribute for a solver specific string explaining why the optimizer stopped.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.ResultCount","page":"Models","title":"MathOptInterface.ResultCount","text":"ResultCount()\n\nA model attribute for the number of results available.\n\nOrder of solutions\n\nA number of attributes contain an index, result_index, which is used to refer to one of the available results. Thus, result_index must be an integer between 1 and the number of available results.\n\nAs a general rule, the first result (result_index=1) is the most important result (for example, an optimal solution or an infeasibility certificate). Other results will typically be alternate solutions that the solver found during the search for the first result.\n\nIf a (local) optimal solution is available, that is, TerminationStatus is OPTIMAL or LOCALLY_SOLVED, the first result must correspond to the (locally) optimal solution. Other results may be alternative optimal solutions, or they may be other suboptimal solutions; use ObjectiveValue to distinguish between them.\n\nIf a primal or dual infeasibility certificate is available, that is, TerminationStatus is INFEASIBLE or DUAL_INFEASIBLE and the corresponding PrimalStatus or DualStatus is INFEASIBILITY_CERTIFICATE, then the first result must be a certificate. Other results may be alternate certificates, or infeasible points.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.ObjectiveValue","page":"Models","title":"MathOptInterface.ObjectiveValue","text":"ObjectiveValue(result_index::Int = 1)\n\nA model attribute for the objective value of the primal solution result_index.\n\nIf the solver does not have a primal value for the objective because the result_index is beyond the available solutions (whose number is indicated by the ResultCount attribute), getting this attribute must throw a ResultIndexBoundsError. Otherwise, if the result is unavailable for another reason (for instance, only a dual solution is available), the result is undefined. Users should first check PrimalStatus before accessing the ObjectiveValue attribute.\n\nSee ResultCount for information on how the results are ordered.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.DualObjectiveValue","page":"Models","title":"MathOptInterface.DualObjectiveValue","text":"DualObjectiveValue(result_index::Int = 1)\n\nA model attribute for the value of the objective function of the dual problem for the result_indexth dual result.\n\nIf the solver does not have a dual value for the objective because the result_index is beyond the available solutions (whose number is indicated by the ResultCount attribute), getting this attribute must throw a ResultIndexBoundsError. Otherwise, if the result is unavailable for another reason (for instance, only a primal solution is available), the result is undefined. Users should first check DualStatus before accessing the DualObjectiveValue attribute.\n\nSee ResultCount for information on how the results are ordered.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.ObjectiveBound","page":"Models","title":"MathOptInterface.ObjectiveBound","text":"ObjectiveBound()\n\nA model attribute for the best known bound on the optimal objective value.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.RelativeGap","page":"Models","title":"MathOptInterface.RelativeGap","text":"RelativeGap()\n\nA model attribute for the final relative optimality gap.\n\nwarning: Warning\nThe definition of this gap is solver-dependent. However, most solvers implementing this attribute define the relative gap as some variation of fracb-ff, where b is the best bound and f is the best feasible objective value.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.SolveTimeSec","page":"Models","title":"MathOptInterface.SolveTimeSec","text":"SolveTimeSec()\n\nA model attribute for the total elapsed solution time (in seconds) as reported by the optimizer.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.SimplexIterations","page":"Models","title":"MathOptInterface.SimplexIterations","text":"SimplexIterations()\n\nA model attribute for the cumulative number of simplex iterations during the optimization process.\n\nFor a mixed-integer program (MIP), the return value is the total simplex iterations for all nodes.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.BarrierIterations","page":"Models","title":"MathOptInterface.BarrierIterations","text":"BarrierIterations()\n\nA model attribute for the cumulative number of barrier iterations while solving a problem.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.NodeCount","page":"Models","title":"MathOptInterface.NodeCount","text":"NodeCount()\n\nA model attribute for the total number of branch-and-bound nodes explored while solving a mixed-integer program (MIP).\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#ResultStatusCode","page":"Models","title":"ResultStatusCode","text":"","category":"section"},{"location":"moi/reference/models/","page":"Models","title":"Models","text":"ResultStatusCode\nNO_SOLUTION\nFEASIBLE_POINT\nNEARLY_FEASIBLE_POINT\nINFEASIBLE_POINT\nINFEASIBILITY_CERTIFICATE\nNEARLY_INFEASIBILITY_CERTIFICATE\nREDUCTION_CERTIFICATE\nNEARLY_REDUCTION_CERTIFICATE\nUNKNOWN_RESULT_STATUS\nOTHER_RESULT_STATUS","category":"page"},{"location":"moi/reference/models/#MathOptInterface.ResultStatusCode","page":"Models","title":"MathOptInterface.ResultStatusCode","text":"ResultStatusCode\n\nAn Enum of possible values for the PrimalStatus and DualStatus attributes.\n\nThe values indicate how to interpret the result vector.\n\nValues\n\nPossible values are:\n\nNO_SOLUTION: the result vector is empty.\nFEASIBLE_POINT: the result vector is a feasible point.\nNEARLY_FEASIBLE_POINT: the result vector is feasible if some constraint tolerances are relaxed.\nINFEASIBLE_POINT: the result vector is an infeasible point.\nINFEASIBILITY_CERTIFICATE: the result vector is an infeasibility certificate. If the PrimalStatus is INFEASIBILITY_CERTIFICATE, then the primal result vector is a certificate of dual infeasibility. If the DualStatus is INFEASIBILITY_CERTIFICATE, then the dual result vector is a proof of primal infeasibility.\nNEARLY_INFEASIBILITY_CERTIFICATE: the result satisfies a relaxed criterion for a certificate of infeasibility.\nREDUCTION_CERTIFICATE: the result vector is an ill-posed certificate; see this article for details. If the PrimalStatus is REDUCTION_CERTIFICATE, then the primal result vector is a proof that the dual problem is ill-posed. If the DualStatus is REDUCTION_CERTIFICATE, then the dual result vector is a proof that the primal is ill-posed.\nNEARLY_REDUCTION_CERTIFICATE: the result satisfies a relaxed criterion for an ill-posed certificate.\nUNKNOWN_RESULT_STATUS: the result vector contains a solution with an unknown interpretation.\nOTHER_RESULT_STATUS: the result vector contains a solution with an interpretation not covered by one of the statuses defined above\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.NO_SOLUTION","page":"Models","title":"MathOptInterface.NO_SOLUTION","text":"NO_SOLUTION::ResultStatusCode\n\nAn instance of the ResultStatusCode enum.\n\nNO_SOLUTION: the result vector is empty.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.FEASIBLE_POINT","page":"Models","title":"MathOptInterface.FEASIBLE_POINT","text":"FEASIBLE_POINT::ResultStatusCode\n\nAn instance of the ResultStatusCode enum.\n\nFEASIBLE_POINT: the result vector is a feasible point.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.NEARLY_FEASIBLE_POINT","page":"Models","title":"MathOptInterface.NEARLY_FEASIBLE_POINT","text":"NEARLY_FEASIBLE_POINT::ResultStatusCode\n\nAn instance of the ResultStatusCode enum.\n\nNEARLY_FEASIBLE_POINT: the result vector is feasible if some constraint tolerances are relaxed.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.INFEASIBLE_POINT","page":"Models","title":"MathOptInterface.INFEASIBLE_POINT","text":"INFEASIBLE_POINT::ResultStatusCode\n\nAn instance of the ResultStatusCode enum.\n\nINFEASIBLE_POINT: the result vector is an infeasible point.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.INFEASIBILITY_CERTIFICATE","page":"Models","title":"MathOptInterface.INFEASIBILITY_CERTIFICATE","text":"INFEASIBILITY_CERTIFICATE::ResultStatusCode\n\nAn instance of the ResultStatusCode enum.\n\nINFEASIBILITY_CERTIFICATE: the result vector is an infeasibility certificate. If the PrimalStatus is INFEASIBILITY_CERTIFICATE, then the primal result vector is a certificate of dual infeasibility. If the DualStatus is INFEASIBILITY_CERTIFICATE, then the dual result vector is a proof of primal infeasibility.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.NEARLY_INFEASIBILITY_CERTIFICATE","page":"Models","title":"MathOptInterface.NEARLY_INFEASIBILITY_CERTIFICATE","text":"NEARLY_INFEASIBILITY_CERTIFICATE::ResultStatusCode\n\nAn instance of the ResultStatusCode enum.\n\nNEARLY_INFEASIBILITY_CERTIFICATE: the result satisfies a relaxed criterion for a certificate of infeasibility.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.REDUCTION_CERTIFICATE","page":"Models","title":"MathOptInterface.REDUCTION_CERTIFICATE","text":"REDUCTION_CERTIFICATE::ResultStatusCode\n\nAn instance of the ResultStatusCode enum.\n\nREDUCTION_CERTIFICATE: the result vector is an ill-posed certificate; see this article for details. If the PrimalStatus is REDUCTION_CERTIFICATE, then the primal result vector is a proof that the dual problem is ill-posed. If the DualStatus is REDUCTION_CERTIFICATE, then the dual result vector is a proof that the primal is ill-posed.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.NEARLY_REDUCTION_CERTIFICATE","page":"Models","title":"MathOptInterface.NEARLY_REDUCTION_CERTIFICATE","text":"NEARLY_REDUCTION_CERTIFICATE::ResultStatusCode\n\nAn instance of the ResultStatusCode enum.\n\nNEARLY_REDUCTION_CERTIFICATE: the result satisfies a relaxed criterion for an ill-posed certificate.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.UNKNOWN_RESULT_STATUS","page":"Models","title":"MathOptInterface.UNKNOWN_RESULT_STATUS","text":"UNKNOWN_RESULT_STATUS::ResultStatusCode\n\nAn instance of the ResultStatusCode enum.\n\nUNKNOWN_RESULT_STATUS: the result vector contains a solution with an unknown interpretation.\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.OTHER_RESULT_STATUS","page":"Models","title":"MathOptInterface.OTHER_RESULT_STATUS","text":"OTHER_RESULT_STATUS::ResultStatusCode\n\nAn instance of the ResultStatusCode enum.\n\nOTHER_RESULT_STATUS: the result vector contains a solution with an interpretation not covered by one of the statuses defined above\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#Conflict-Status","page":"Models","title":"Conflict Status","text":"","category":"section"},{"location":"moi/reference/models/","page":"Models","title":"Models","text":"compute_conflict!\nConflictStatus\nConstraintConflictStatus\nConflictStatusCode\nConflictParticipationStatusCode\nNOT_IN_CONFLICT\nIN_CONFLICT\nMAYBE_IN_CONFLICT","category":"page"},{"location":"moi/reference/models/#MathOptInterface.compute_conflict!","page":"Models","title":"MathOptInterface.compute_conflict!","text":"compute_conflict!(optimizer::AbstractOptimizer)\n\nComputes a minimal subset of constraints such that the model with the other constraint removed is still infeasible.\n\nSome solvers call a set of conflicting constraints an Irreducible Inconsistent Subsystem (IIS).\n\nSee also ConflictStatus and ConstraintConflictStatus.\n\nNote\n\nIf the model is modified after a call to compute_conflict!, the implementor is not obliged to purge the conflict. Any calls to the above attributes may return values for the original conflict without a warning. Similarly, when modifying the model, the conflict can be discarded.\n\n\n\n\n\n","category":"function"},{"location":"moi/reference/models/#MathOptInterface.ConflictStatus","page":"Models","title":"MathOptInterface.ConflictStatus","text":"ConflictStatus()\n\nA model attribute for the ConflictStatusCode explaining why the conflict refiner stopped when computing the conflict.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.ConstraintConflictStatus","page":"Models","title":"MathOptInterface.ConstraintConflictStatus","text":"ConstraintConflictStatus()\n\nA constraint attribute indicating whether the constraint participates in the conflict. Its type is ConflictParticipationStatusCode.\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.ConflictStatusCode","page":"Models","title":"MathOptInterface.ConflictStatusCode","text":"ConflictStatusCode\n\nAn Enum of possible values for the ConflictStatus attribute. This attribute is meant to explain the reason why the conflict finder stopped executing in the most recent call to compute_conflict!.\n\nPossible values are:\n\nCOMPUTE_CONFLICT_NOT_CALLED: the function compute_conflict! has not yet been called\nNO_CONFLICT_EXISTS: there is no conflict because the problem is feasible\nNO_CONFLICT_FOUND: the solver could not find a conflict\nCONFLICT_FOUND: at least one conflict could be found\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.ConflictParticipationStatusCode","page":"Models","title":"MathOptInterface.ConflictParticipationStatusCode","text":"ConflictParticipationStatusCode\n\nAn Enum of possible values for the ConstraintConflictStatus attribute. This attribute is meant to indicate whether a given constraint participates or not in the last computed conflict.\n\nValues\n\nPossible values are:\n\nNOT_IN_CONFLICT: the constraint does not participate in the conflict\nIN_CONFLICT: the constraint participates in the conflict\nMAYBE_IN_CONFLICT: the constraint may participate in the conflict, the solver was not able to prove that the constraint can be excluded from the conflict\n\n\n\n\n\n","category":"type"},{"location":"moi/reference/models/#MathOptInterface.NOT_IN_CONFLICT","page":"Models","title":"MathOptInterface.NOT_IN_CONFLICT","text":"NOT_IN_CONFLICT::ConflictParticipationStatusCode\n\nAn instance of the ConflictParticipationStatusCode enum.\n\nNOT_IN_CONFLICT: the constraint does not participate in the conflict\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.IN_CONFLICT","page":"Models","title":"MathOptInterface.IN_CONFLICT","text":"IN_CONFLICT::ConflictParticipationStatusCode\n\nAn instance of the ConflictParticipationStatusCode enum.\n\nIN_CONFLICT: the constraint participates in the conflict\n\n\n\n\n\n","category":"constant"},{"location":"moi/reference/models/#MathOptInterface.MAYBE_IN_CONFLICT","page":"Models","title":"MathOptInterface.MAYBE_IN_CONFLICT","text":"MAYBE_IN_CONFLICT::ConflictParticipationStatusCode\n\nAn instance of the ConflictParticipationStatusCode enum.\n\nMAYBE_IN_CONFLICT: the constraint may participate in the conflict, the solver was not able to prove that the constraint can be excluded from the conflict\n\n\n\n\n\n","category":"constant"},{"location":"moi/submodules/Nonlinear/reference/","page":"API Reference","title":"API Reference","text":"EditURL = \"https://github.com/jump-dev/MathOptInterface.jl/blob/v1.34.0/docs/src/submodules/Nonlinear/reference.md\"","category":"page"},{"location":"moi/submodules/Nonlinear/reference/","page":"API Reference","title":"API Reference","text":"CurrentModule = MathOptInterface\nDocTestSetup = quote\n import MathOptInterface as MOI\nend\nDocTestFilters = [r\"MathOptInterface|MOI\"]","category":"page"},{"location":"moi/submodules/Nonlinear/reference/#NonlinearAPI","page":"API Reference","title":"Nonlinear Modeling","text":"","category":"section"},{"location":"moi/submodules/Nonlinear/reference/","page":"API Reference","title":"API Reference","text":"More information can be found in the Nonlinear section of the manual.","category":"page"},{"location":"moi/submodules/Nonlinear/reference/","page":"API Reference","title":"API Reference","text":"Nonlinear\nNonlinear.Model","category":"page"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear","page":"API Reference","title":"MathOptInterface.Nonlinear","text":"Nonlinear\n\nwarning: Warning\nThe Nonlinear submodule is experimental. Until this message is removed, breaking changes may be introduced in any minor or patch release of MathOptInterface.\n\n\n\n\n\n","category":"module"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.Model","page":"API Reference","title":"MathOptInterface.Nonlinear.Model","text":"Model()\n\nThe core datastructure for representing a nonlinear optimization problem.\n\nIt has the following fields:\n\nobjective::Union{Nothing,Expression} : holds the nonlinear objective function, if one exists, otherwise nothing.\nexpressions::Vector{Expression} : a vector of expressions in the model.\nconstraints::OrderedDict{ConstraintIndex,Constraint} : a map from ConstraintIndex to the corresponding Constraint. An OrderedDict is used instead of a Vector to support constraint deletion.\nparameters::Vector{Float64} : holds the current values of the parameters.\noperators::OperatorRegistry : stores the operators used in the model.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Nonlinear/reference/#nonlinear_api_expressions","page":"API Reference","title":"Expressions","text":"","category":"section"},{"location":"moi/submodules/Nonlinear/reference/","page":"API Reference","title":"API Reference","text":"Nonlinear.ExpressionIndex\nNonlinear.add_expression","category":"page"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.ExpressionIndex","page":"API Reference","title":"MathOptInterface.Nonlinear.ExpressionIndex","text":"ExpressionIndex\n\nAn index to a nonlinear expression that is returned by add_expression.\n\nGiven data::Model and ex::ExpressionIndex, use data[ex] to retrieve the corresponding Expression.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.add_expression","page":"API Reference","title":"MathOptInterface.Nonlinear.add_expression","text":"add_expression(model::Model, expr)::ExpressionIndex\n\nParse expr into a Expression and add to model. Returns an ExpressionIndex that can be interpolated into other input expressions.\n\nexpr must be a type that is supported by parse_expression.\n\nExample\n\njulia> import MathOptInterface as MOI\n\njulia> model = MOI.Nonlinear.Model();\n\njulia> x = MOI.VariableIndex(1);\n\njulia> ex = MOI.Nonlinear.add_expression(model, :($x^2 + 1))\nMathOptInterface.Nonlinear.ExpressionIndex(1)\n\njulia> MOI.Nonlinear.set_objective(model, :(sqrt($ex)))\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Nonlinear/reference/#nonlinear_api_parameters","page":"API Reference","title":"Parameters","text":"","category":"section"},{"location":"moi/submodules/Nonlinear/reference/","page":"API Reference","title":"API Reference","text":"Nonlinear.ParameterIndex\nNonlinear.add_parameter","category":"page"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.ParameterIndex","page":"API Reference","title":"MathOptInterface.Nonlinear.ParameterIndex","text":"ParameterIndex\n\nAn index to a nonlinear parameter that is returned by add_parameter. Given data::Model and p::ParameterIndex, use data[p] to retrieve the current value of the parameter and data[p] = value to set a new value.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.add_parameter","page":"API Reference","title":"MathOptInterface.Nonlinear.add_parameter","text":"add_parameter(model::Model, value::Float64)::ParameterIndex\n\nAdd a new parameter to model with the default value value. Returns a ParameterIndex that can be interpolated into other input expressions and used to modify the value of the parameter.\n\nExample\n\njulia> import MathOptInterface as MOI\n\njulia> model = MOI.Nonlinear.Model()\nA Nonlinear.Model with:\n 0 objectives\n 0 parameters\n 0 expressions\n 0 constraints\n\njulia> x = MOI.VariableIndex(1)\nMOI.VariableIndex(1)\n\njulia> p = MOI.Nonlinear.add_parameter(model, 1.2)\nMathOptInterface.Nonlinear.ParameterIndex(1)\n\njulia> c = MOI.Nonlinear.add_constraint(model, :($x^2 - $p), MOI.LessThan(0.0))\nMathOptInterface.Nonlinear.ConstraintIndex(1)\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Nonlinear/reference/#nonlinear_api_objectives","page":"API Reference","title":"Objectives","text":"","category":"section"},{"location":"moi/submodules/Nonlinear/reference/","page":"API Reference","title":"API Reference","text":"Nonlinear.set_objective","category":"page"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.set_objective","page":"API Reference","title":"MathOptInterface.Nonlinear.set_objective","text":"set_objective(model::Model, obj)::Nothing\n\nParse obj into a Expression and set as the objective function of model.\n\nobj must be a type that is supported by parse_expression.\n\nTo remove the objective, pass nothing.\n\nExample\n\njulia> import MathOptInterface as MOI\n\njulia> model = MOI.Nonlinear.Model()\nA Nonlinear.Model with:\n 0 objectives\n 0 parameters\n 0 expressions\n 0 constraints\n\njulia> x = MOI.VariableIndex(1)\nMOI.VariableIndex(1)\n\njulia> MOI.Nonlinear.set_objective(model, :($x^2 + 1))\n\njulia> MOI.Nonlinear.set_objective(model, x)\n\njulia> MOI.Nonlinear.set_objective(model, nothing)\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Nonlinear/reference/#nonlinear_api_constraints","page":"API Reference","title":"Constraints","text":"","category":"section"},{"location":"moi/submodules/Nonlinear/reference/","page":"API Reference","title":"API Reference","text":"Nonlinear.ConstraintIndex\nNonlinear.add_constraint\nNonlinear.delete","category":"page"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.ConstraintIndex","page":"API Reference","title":"MathOptInterface.Nonlinear.ConstraintIndex","text":"ConstraintIndex\n\nAn index to a nonlinear constraint that is returned by add_constraint.\n\nGiven data::Model and c::ConstraintIndex, use data[c] to retrieve the corresponding Constraint.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.add_constraint","page":"API Reference","title":"MathOptInterface.Nonlinear.add_constraint","text":"add_constraint(\n model::Model,\n func,\n set::Union{\n MOI.GreaterThan{Float64},\n MOI.LessThan{Float64},\n MOI.Interval{Float64},\n MOI.EqualTo{Float64},\n },\n)\n\nParse func and set into a Constraint and add to model. Returns a ConstraintIndex that can be used to delete the constraint or query solution information.\n\nExample\n\njulia> import MathOptInterface as MOI\n\njulia> model = MOI.Nonlinear.Model();\n\njulia> x = MOI.VariableIndex(1);\n\njulia> c = MOI.Nonlinear.add_constraint(model, :($x^2), MOI.LessThan(1.0))\nMathOptInterface.Nonlinear.ConstraintIndex(1)\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.delete","page":"API Reference","title":"MathOptInterface.Nonlinear.delete","text":"delete(model::Model, c::ConstraintIndex)::Nothing\n\nDelete the constraint index c from model.\n\nExample\n\njulia> import MathOptInterface as MOI\n\njulia> model = MOI.Nonlinear.Model()\nA Nonlinear.Model with:\n 0 objectives\n 0 parameters\n 0 expressions\n 0 constraints\n\njulia> x = MOI.VariableIndex(1)\nMOI.VariableIndex(1)\n\njulia> c = MOI.Nonlinear.add_constraint(model, :($x^2), MOI.LessThan(1.0))\nMathOptInterface.Nonlinear.ConstraintIndex(1)\n\njulia> model\nA Nonlinear.Model with:\n 0 objectives\n 0 parameters\n 0 expressions\n 1 constraint\n\njulia> MOI.Nonlinear.delete(model, c)\n\njulia> model\nA Nonlinear.Model with:\n 0 objectives\n 0 parameters\n 0 expressions\n 0 constraints\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Nonlinear/reference/#nonlinear_api_operators","page":"API Reference","title":"User-defined operators","text":"","category":"section"},{"location":"moi/submodules/Nonlinear/reference/","page":"API Reference","title":"API Reference","text":"Nonlinear.OperatorRegistry\nNonlinear.DEFAULT_UNIVARIATE_OPERATORS\nNonlinear.DEFAULT_MULTIVARIATE_OPERATORS\nNonlinear.register_operator\nNonlinear.register_operator_if_needed\nNonlinear.assert_registered\nNonlinear.check_return_type\nNonlinear.eval_univariate_function\nNonlinear.eval_univariate_gradient\nNonlinear.eval_univariate_hessian\nNonlinear.eval_multivariate_function\nNonlinear.eval_multivariate_gradient\nNonlinear.eval_multivariate_hessian\nNonlinear.eval_logic_function\nNonlinear.eval_comparison_function","category":"page"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.OperatorRegistry","page":"API Reference","title":"MathOptInterface.Nonlinear.OperatorRegistry","text":"OperatorRegistry()\n\nCreate a new OperatorRegistry to store and evaluate univariate and multivariate operators.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.DEFAULT_UNIVARIATE_OPERATORS","page":"API Reference","title":"MathOptInterface.Nonlinear.DEFAULT_UNIVARIATE_OPERATORS","text":"DEFAULT_UNIVARIATE_OPERATORS\n\nThe list of univariate operators that are supported by default.\n\nExample\n\njulia> import MathOptInterface as MOI\n\njulia> MOI.Nonlinear.DEFAULT_UNIVARIATE_OPERATORS\n73-element Vector{Symbol}:\n :+\n :-\n :abs\n :sign\n :sqrt\n :cbrt\n :abs2\n :inv\n :log\n :log10\n ⋮\n :airybi\n :airyaiprime\n :airybiprime\n :besselj0\n :besselj1\n :bessely0\n :bessely1\n :erfcx\n :dawson\n\n\n\n\n\n","category":"constant"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.DEFAULT_MULTIVARIATE_OPERATORS","page":"API Reference","title":"MathOptInterface.Nonlinear.DEFAULT_MULTIVARIATE_OPERATORS","text":"DEFAULT_MULTIVARIATE_OPERATORS\n\nThe list of multivariate operators that are supported by default.\n\nExample\n\njulia> import MathOptInterface as MOI\n\njulia> MOI.Nonlinear.DEFAULT_MULTIVARIATE_OPERATORS\n9-element Vector{Symbol}:\n :+\n :-\n :*\n :^\n :/\n :ifelse\n :atan\n :min\n :max\n\n\n\n\n\n","category":"constant"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.register_operator","page":"API Reference","title":"MathOptInterface.Nonlinear.register_operator","text":"register_operator(\n model::Model,\n op::Symbol,\n nargs::Int,\n f::Function,\n [∇f::Function],\n [∇²f::Function],\n)\n\nRegister the user-defined operator op with nargs input arguments in model.\n\nUnivariate functions\n\nf(x::T)::T must be a function that takes a single input argument x and returns the function evaluated at x. If ∇f and ∇²f are not provided, f must support any Real input type T.\n∇f(x::T)::T is a function that takes a single input argument x and returns the first derivative of f with respect to x. If ∇²f is not provided, ∇f must support any Real input type T.\n∇²f(x::T)::T is a function that takes a single input argument x and returns the second derivative of f with respect to x.\n\nMultivariate functions\n\nf(x::T...)::T must be a function that takes a nargs input arguments x and returns the function evaluated at x. If ∇f and ∇²f are not provided, f must support any Real input type T.\n∇f(g::AbstractVector{T}, x::T...)::T is a function that takes a cache vector g of length length(x), and fills each element g[i] with the partial derivative of f with respect to x[i].\n∇²f(H::AbstractMatrix, x::T...)::T is a function that takes a matrix H and fills the lower-triangular components H[i, j] with the Hessian of f with respect to x[i] and x[j] for i >= j.\n\nNotes for multivariate Hessians\n\nH has size(H) == (length(x), length(x)), but you must not access elements H[i, j] for i > j.\nH is dense, but you do not need to fill structural zeros.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.register_operator_if_needed","page":"API Reference","title":"MathOptInterface.Nonlinear.register_operator_if_needed","text":"register_operator_if_needed(\n registry::OperatorRegistry,\n op::Symbol,\n nargs::Int,\n f::Function;\n)\n\nSimilar to register_operator, but this function warns if the function is not registered, and skips silently if it already is.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.assert_registered","page":"API Reference","title":"MathOptInterface.Nonlinear.assert_registered","text":"assert_registered(registry::OperatorRegistry, op::Symbol, nargs::Int)\n\nThrow an error if op is not registered in registry with nargs arguments.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.check_return_type","page":"API Reference","title":"MathOptInterface.Nonlinear.check_return_type","text":"check_return_type(::Type{T}, ret::S) where {T,S}\n\nOverload this method for new types S to throw an informative error if a user-defined function returns the type S instead of T.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.eval_univariate_function","page":"API Reference","title":"MathOptInterface.Nonlinear.eval_univariate_function","text":"eval_univariate_function(\n registry::OperatorRegistry,\n op::Symbol,\n x::T,\n) where {T}\n\nEvaluate the operator op(x)::T, where op is a univariate function in registry.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.eval_univariate_gradient","page":"API Reference","title":"MathOptInterface.Nonlinear.eval_univariate_gradient","text":"eval_univariate_gradient(\n registry::OperatorRegistry,\n op::Symbol,\n x::T,\n) where {T}\n\nEvaluate the first-derivative of the operator op(x)::T, where op is a univariate function in registry.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.eval_univariate_hessian","page":"API Reference","title":"MathOptInterface.Nonlinear.eval_univariate_hessian","text":"eval_univariate_hessian(\n registry::OperatorRegistry,\n op::Symbol,\n x::T,\n) where {T}\n\nEvaluate the second-derivative of the operator op(x)::T, where op is a univariate function in registry.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.eval_multivariate_function","page":"API Reference","title":"MathOptInterface.Nonlinear.eval_multivariate_function","text":"eval_multivariate_function(\n registry::OperatorRegistry,\n op::Symbol,\n x::AbstractVector{T},\n) where {T}\n\nEvaluate the operator op(x)::T, where op is a multivariate function in registry.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.eval_multivariate_gradient","page":"API Reference","title":"MathOptInterface.Nonlinear.eval_multivariate_gradient","text":"eval_multivariate_gradient(\n registry::OperatorRegistry,\n op::Symbol,\n g::AbstractVector{T},\n x::AbstractVector{T},\n) where {T}\n\nEvaluate the gradient of operator g .= ∇op(x), where op is a multivariate function in registry.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.eval_multivariate_hessian","page":"API Reference","title":"MathOptInterface.Nonlinear.eval_multivariate_hessian","text":"eval_multivariate_hessian(\n registry::OperatorRegistry,\n op::Symbol,\n H::AbstractMatrix,\n x::AbstractVector{T},\n) where {T}\n\nEvaluate the Hessian of operator ∇²op(x), where op is a multivariate function in registry.\n\nThe Hessian is stored in the lower-triangular part of the matrix H.\n\nnote: Note\nImplementations of the Hessian operators will not fill structural zeros. Therefore, before calling this function you should pre-populate the matrix H with 0.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.eval_logic_function","page":"API Reference","title":"MathOptInterface.Nonlinear.eval_logic_function","text":"eval_logic_function(\n registry::OperatorRegistry,\n op::Symbol,\n lhs::T,\n rhs::T,\n)::Bool where {T}\n\nEvaluate (lhs op rhs)::Bool, where op is a logic operator in registry.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.eval_comparison_function","page":"API Reference","title":"MathOptInterface.Nonlinear.eval_comparison_function","text":"eval_comparison_function(\n registry::OperatorRegistry,\n op::Symbol,\n lhs::T,\n rhs::T,\n)::Bool where {T}\n\nEvaluate (lhs op rhs)::Bool, where op is a comparison operator in registry.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Nonlinear/reference/#Automatic-differentiation-backends","page":"API Reference","title":"Automatic-differentiation backends","text":"","category":"section"},{"location":"moi/submodules/Nonlinear/reference/","page":"API Reference","title":"API Reference","text":"Nonlinear.Evaluator\nNonlinear.AbstractAutomaticDifferentiation\nNonlinear.ExprGraphOnly\nNonlinear.SparseReverseMode","category":"page"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.Evaluator","page":"API Reference","title":"MathOptInterface.Nonlinear.Evaluator","text":"Evaluator(\n model::Model,\n backend::AbstractAutomaticDifferentiation,\n ordered_variables::Vector{MOI.VariableIndex},\n)\n\nCreate Evaluator, a subtype of MOI.AbstractNLPEvaluator, from Model.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.AbstractAutomaticDifferentiation","page":"API Reference","title":"MathOptInterface.Nonlinear.AbstractAutomaticDifferentiation","text":"AbstractAutomaticDifferentiation\n\nAn abstract type for extending Evaluator.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.ExprGraphOnly","page":"API Reference","title":"MathOptInterface.Nonlinear.ExprGraphOnly","text":"ExprGraphOnly() <: AbstractAutomaticDifferentiation\n\nThe default implementation of AbstractAutomaticDifferentiation. The only supported feature is :ExprGraph.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.SparseReverseMode","page":"API Reference","title":"MathOptInterface.Nonlinear.SparseReverseMode","text":"SparseReverseMode() <: AbstractAutomaticDifferentiation\n\nAn implementation of AbstractAutomaticDifferentiation that uses sparse reverse-mode automatic differentiation to compute derivatives. Supports all features in the MOI nonlinear interface.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Nonlinear/reference/#Data-structure","page":"API Reference","title":"Data-structure","text":"","category":"section"},{"location":"moi/submodules/Nonlinear/reference/","page":"API Reference","title":"API Reference","text":"Nonlinear.Node\nNonlinear.NodeType\nNonlinear.Expression\nNonlinear.Constraint\nNonlinear.adjacency_matrix\nNonlinear.parse_expression\nNonlinear.convert_to_expr\nNonlinear.ordinal_index","category":"page"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.Node","page":"API Reference","title":"MathOptInterface.Nonlinear.Node","text":"struct Node\n type::NodeType\n index::Int\n parent::Int\nend\n\nA single node in a nonlinear expression tree. Used by Expression.\n\nSee the MathOptInterface documentation for information on how the nodes and values form an expression tree.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.NodeType","page":"API Reference","title":"MathOptInterface.Nonlinear.NodeType","text":"NodeType\n\nAn enum describing the possible node types. Each Node has a .index field, which should be interpreted as follows:\n\nNODE_CALL_MULTIVARIATE: the index into operators.multivariate_operators\nNODE_CALL_UNIVARIATE: the index into operators.univariate_operators\nNODE_LOGIC: the index into operators.logic_operators\nNODE_COMPARISON: the index into operators.comparison_operators\nNODE_MOI_VARIABLE: the value of MOI.VariableIndex(index) in the user's space of the model.\nNODE_VARIABLE: the 1-based index of the internal vector\nNODE_VALUE: the index into the .values field of Expression\nNODE_PARAMETER: the index into data.parameters\nNODE_SUBEXPRESSION: the index into data.expressions\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.Expression","page":"API Reference","title":"MathOptInterface.Nonlinear.Expression","text":"struct Expression\n nodes::Vector{Node}\n values::Vector{Float64}\nend\n\nThe core type that represents a nonlinear expression. See the MathOptInterface documentation for information on how the nodes and values form an expression tree.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.Constraint","page":"API Reference","title":"MathOptInterface.Nonlinear.Constraint","text":"struct Constraint\n expression::Expression\n set::Union{\n MOI.LessThan{Float64},\n MOI.GreaterThan{Float64},\n MOI.EqualTo{Float64},\n MOI.Interval{Float64},\n }\nend\n\nA type to hold information relating to the nonlinear constraint f(x) in S, where f(x) is defined by .expression, and S is .set.\n\n\n\n\n\n","category":"type"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.adjacency_matrix","page":"API Reference","title":"MathOptInterface.Nonlinear.adjacency_matrix","text":"adjacency_matrix(nodes::Vector{Node})\n\nCompute the sparse adjacency matrix describing the parent-child relationships in nodes.\n\nThe element (i, j) is true if there is an edge from node[j] to node[i]. Since we get a column-oriented matrix, this gives us a fast way to look up the edges leaving any node (that is, the children).\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.parse_expression","page":"API Reference","title":"MathOptInterface.Nonlinear.parse_expression","text":"parse_expression(data::Model, input)::Expression\n\nParse input into a Expression.\n\n\n\n\n\nparse_expression(\n data::Model,\n expr::Expression,\n input::Any,\n parent_index::Int,\n)::Expression\n\nParse input into a Expression, and add it to expr as a child of expr.nodes[parent_index]. Existing subexpressions and parameters are stored in data.\n\nYou can extend parsing support to new types of objects by overloading this method with a different type on input::Any.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.convert_to_expr","page":"API Reference","title":"MathOptInterface.Nonlinear.convert_to_expr","text":"convert_to_expr(data::Model, expr::Expression)\n\nConvert the Expression expr into a Julia Expr.\n\nsubexpressions are represented by a ExpressionIndex object.\nparameters are represented by a ParameterIndex object.\nvariables are represented by an MOI.VariableIndex object.\n\n\n\n\n\nconvert_to_expr(\n evaluator::Evaluator,\n expr::Expression;\n moi_output_format::Bool,\n)\n\nConvert the Expression expr into a Julia Expr.\n\nIf moi_output_format = true:\n\nsubexpressions will be converted to Julia Expr and substituted into the output expression.\nthe current value of each parameter will be interpolated into the expression\nvariables will be represented in the form x[MOI.VariableIndex(i)]\n\nIf moi_output_format = false:\n\nsubexpressions will be represented by a ExpressionIndex object.\nparameters will be represented by a ParameterIndex object.\nvariables will be represented by an MOI.VariableIndex object.\n\nwarning: Warning\nTo use moi_output_format = true, you must have first called MOI.initialize with :ExprGraph as a requested feature.\n\n\n\n\n\n","category":"function"},{"location":"moi/submodules/Nonlinear/reference/#MathOptInterface.Nonlinear.ordinal_index","page":"API Reference","title":"MathOptInterface.Nonlinear.ordinal_index","text":"ordinal_index(evaluator::Evaluator, c::ConstraintIndex)::Int\n\nReturn the 1-indexed value of the constraint index c in evaluator.\n\nExample\n\njulia> import MathOptInterface as MOI\n\njulia> model = MOI.Nonlinear.Model()\nA Nonlinear.Model with:\n 0 objectives\n 0 parameters\n 0 expressions\n 0 constraints\n\njulia> x = MOI.VariableIndex(1)\nMOI.VariableIndex(1)\n\njulia> c1 = MOI.Nonlinear.add_constraint(model, :($x^2), MOI.LessThan(1.0))\nMathOptInterface.Nonlinear.ConstraintIndex(1)\n\njulia> c2 = MOI.Nonlinear.add_constraint(model, :($x^2), MOI.LessThan(1.0))\nMathOptInterface.Nonlinear.ConstraintIndex(2)\n\njulia> evaluator = MOI.Nonlinear.Evaluator(model)\nNonlinear.Evaluator with available features:\n * :ExprGraph\n\njulia> MOI.initialize(evaluator, Symbol[])\n\njulia> MOI.Nonlinear.ordinal_index(evaluator, c2) # Returns 2\n2\n\njulia> MOI.Nonlinear.delete(model, c1)\n\njulia> evaluator = MOI.Nonlinear.Evaluator(model)\nNonlinear.Evaluator with available features:\n * :ExprGraph\n\njulia> MOI.initialize(evaluator, Symbol[])\n\njulia> MOI.Nonlinear.ordinal_index(evaluator, c2) # Returns 1\n1\n\n\n\n\n\n","category":"function"},{"location":"packages/Juniper/","page":"lanl-ansi/Juniper.jl","title":"lanl-ansi/Juniper.jl","text":"EditURL = \"https://github.com/lanl-ansi/Juniper.jl/blob/v0.9.2/README.md\"","category":"page"},{"location":"packages/Juniper/#Juniper","page":"lanl-ansi/Juniper.jl","title":"Juniper","text":"","category":"section"},{"location":"packages/Juniper/","page":"lanl-ansi/Juniper.jl","title":"lanl-ansi/Juniper.jl","text":"(Image: CI) (Image: codecov) (Image: Documentation)","category":"page"},{"location":"packages/Juniper/","page":"lanl-ansi/Juniper.jl","title":"lanl-ansi/Juniper.jl","text":"Juniper (Jump Nonlinear Integer Program solver) is a solver for mixed-integer nonlinear programs. ","category":"page"},{"location":"packages/Juniper/","page":"lanl-ansi/Juniper.jl","title":"lanl-ansi/Juniper.jl","text":"It is a heuristic which is not guaranteed to find the global optimum. If you need the global optimum, check out Alpine.","category":"page"},{"location":"packages/Juniper/#Installation","page":"lanl-ansi/Juniper.jl","title":"Installation","text":"","category":"section"},{"location":"packages/Juniper/","page":"lanl-ansi/Juniper.jl","title":"lanl-ansi/Juniper.jl","text":"Install Juniper using the Julia package manager:","category":"page"},{"location":"packages/Juniper/","page":"lanl-ansi/Juniper.jl","title":"lanl-ansi/Juniper.jl","text":"import Pkg\nPkg.add(\"JuMP\")","category":"page"},{"location":"packages/Juniper/#Use-with-JuMP","page":"lanl-ansi/Juniper.jl","title":"Use with JuMP","text":"","category":"section"},{"location":"packages/Juniper/","page":"lanl-ansi/Juniper.jl","title":"lanl-ansi/Juniper.jl","text":"Use Juniper with JuMP as follows:","category":"page"},{"location":"packages/Juniper/","page":"lanl-ansi/Juniper.jl","title":"lanl-ansi/Juniper.jl","text":"using JuMP, Juniper, Ipopt\nipopt = optimizer_with_attributes(Ipopt.Optimizer, \"print_level\"=>0)\noptimizer = optimizer_with_attributes(Juniper.Optimizer, \"nl_solver\"=>ipopt)\nmodel = Model(optimizer)\nv = [10, 20, 12, 23, 42]\nw = [12, 45, 12, 22, 21]\n@variable(model, x[1:5], Bin)\n@objective(model, Max, v' * x)\n@constraint(model, sum(w[i]*x[i]^2 for i in 1:5) <= 45)\noptimize!(model)\nprintln(termination_status(model))\nprintln(objective_value(model))\nprintln(value.(x))","category":"page"},{"location":"packages/Juniper/","page":"lanl-ansi/Juniper.jl","title":"lanl-ansi/Juniper.jl","text":"The nl_solver is used by Juniper to solve continuous nonlinear sub-problems while Juniper searches for acceptable assignments to the discrete variables. A common choice is Ipopt, but any optimizer that supports the continuous relaxation of the model may be used.","category":"page"},{"location":"packages/Juniper/","page":"lanl-ansi/Juniper.jl","title":"lanl-ansi/Juniper.jl","text":"To solve problems with more complex nonlinear functions, use the @NLconstraint and @NLobjective JuMP macros.","category":"page"},{"location":"packages/Juniper/#Documentation","page":"lanl-ansi/Juniper.jl","title":"Documentation","text":"","category":"section"},{"location":"packages/Juniper/","page":"lanl-ansi/Juniper.jl","title":"lanl-ansi/Juniper.jl","text":"The online documentation is available at https://lanl-ansi.github.io/Juniper.jl/stable/.","category":"page"},{"location":"packages/Juniper/#Feasibility-pump","page":"lanl-ansi/Juniper.jl","title":"Feasibility pump","text":"","category":"section"},{"location":"packages/Juniper/","page":"lanl-ansi/Juniper.jl","title":"lanl-ansi/Juniper.jl","text":"If Juniper has difficulty finding feasible solutions on your model, try adding a solver that supports integer variables (for example, HiGHS) to run a feasibility pump:","category":"page"},{"location":"packages/Juniper/","page":"lanl-ansi/Juniper.jl","title":"lanl-ansi/Juniper.jl","text":"using JuMP, Juniper, Ipopt, HiGHS\nipopt = optimizer_with_attributes(Ipopt.Optimizer, \"print_level\" => 0)\nhighs = optimizer_with_attributes(HiGHS.Optimizer, \"output_flag\" => false)\nmodel = Model(\n optimizer_with_attributes(\n Juniper.Optimizer,\n \"nl_solver\" => ipopt,\n \"mip_solver\" => highs,\n ),\n)","category":"page"},{"location":"packages/Juniper/","page":"lanl-ansi/Juniper.jl","title":"lanl-ansi/Juniper.jl","text":"The feasibility pump is used at the start of Juniper to find a feasible solution before the branch and bound part starts. For some classes of problems this can be a highly effective pre-processor.","category":"page"},{"location":"packages/Juniper/#Citing-Juniper","page":"lanl-ansi/Juniper.jl","title":"Citing Juniper","text":"","category":"section"},{"location":"packages/Juniper/","page":"lanl-ansi/Juniper.jl","title":"lanl-ansi/Juniper.jl","text":"If you find Juniper useful in your work, we kindly request that you cite the following paper or technical report:","category":"page"},{"location":"packages/Juniper/","page":"lanl-ansi/Juniper.jl","title":"lanl-ansi/Juniper.jl","text":"@inproceedings{juniper,\n Author = {Ole Kröger and Carleton Coffrin and Hassan Hijazi and Harsha Nagarajan},\n Title = {Juniper: An Open-Source Nonlinear Branch-and-Bound Solver in Julia},\n booktitle=\"Integration of Constraint Programming, Artificial Intelligence, and Operations Research\",\n pages=\"377--386\",\n year=\"2018\",\n publisher=\"Springer International Publishing\",\n isbn=\"978-3-319-93031-2\"\n}","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"EditURL = \"diet.jl\"","category":"page"},{"location":"tutorials/linear/diet/#The-diet-problem","page":"The diet problem","title":"The diet problem","text":"","category":"section"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"This tutorial was generated using Literate.jl. Download the source as a .jl file.","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"The purpose of this tutorial is to demonstrate how to incorporate DataFrames into a JuMP model. As an example, we use classic Stigler diet problem.","category":"page"},{"location":"tutorials/linear/diet/#Required-packages","page":"The diet problem","title":"Required packages","text":"","category":"section"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"This tutorial requires the following packages:","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"using JuMP\nimport CSV\nimport DataFrames\nimport HiGHS\nimport Test","category":"page"},{"location":"tutorials/linear/diet/#Formulation","page":"The diet problem","title":"Formulation","text":"","category":"section"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"We wish to cook a nutritionally balanced meal by choosing the quantity of each food f to eat from a set of foods F in our kitchen.","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"Each food f has a cost, c_f, as well as a macro-nutrient profile a_mf for each macro-nutrient m in M.","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"Because we care about a nutritionally balanced meal, we set some minimum and maximum limits for each nutrient, which we denote l_m and u_m respectively.","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"Furthermore, because we are optimizers, we seek the minimum cost solution.","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"With a little effort, we can formulate our dinner problem as the following linear program:","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"beginaligned\nmin sumlimits_f in F c_f x_f \ntextst l_m le sumlimits_f in F a_mf x_f le u_m forall m in M \n x_f ge 0 forall f in F\nendaligned","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"In the rest of this tutorial, we will create and solve this problem in JuMP, and learn what we should cook for dinner.","category":"page"},{"location":"tutorials/linear/diet/#Data","page":"The diet problem","title":"Data","text":"","category":"section"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"First, we need some data for the problem. For this tutorial, we'll write CSV files to a temporary directory from Julia. If you have existing files, you could change the filenames to point to them instead.","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"dir = mktempdir()","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"The first file is a list of foods with their macro-nutrient profile:","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"food_csv_filename = joinpath(dir, \"diet_foods.csv\")\nopen(food_csv_filename, \"w\") do io\n write(\n io,\n \"\"\"\n name,cost,calories,protein,fat,sodium\n hamburger,2.49,410,24,26,730\n chicken,2.89,420,32,10,1190\n hot dog,1.50,560,20,32,1800\n fries,1.89,380,4,19,270\n macaroni,2.09,320,12,10,930\n pizza,1.99,320,15,12,820\n salad,2.49,320,31,12,1230\n milk,0.89,100,8,2.5,125\n ice cream,1.59,330,8,10,180\n \"\"\",\n )\n return\nend\nfoods = CSV.read(food_csv_filename, DataFrames.DataFrame)","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"Here, F is foods.name and c_f is foods.cost. (We're also playing a bit loose the term \"macro-nutrient\" by including calories and sodium.)","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"We also need our minimum and maximum limits:","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"nutrient_csv_filename = joinpath(dir, \"diet_nutrient.csv\")\nopen(nutrient_csv_filename, \"w\") do io\n write(\n io,\n \"\"\"\n nutrient,min,max\n calories,1800,2200\n protein,91,\n fat,0,65\n sodium,0,1779\n \"\"\",\n )\n return\nend\nlimits = CSV.read(nutrient_csv_filename, DataFrames.DataFrame)","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"Protein is missing data for the maximum. Let's fix that using coalesce:","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"limits.max = coalesce.(limits.max, Inf)\nlimits","category":"page"},{"location":"tutorials/linear/diet/#JuMP-formulation","page":"The diet problem","title":"JuMP formulation","text":"","category":"section"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"Now we're ready to convert our mathematical formulation into a JuMP model.","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"First, create a new JuMP model. Since we have a linear program, we'll use HiGHS as our optimizer:","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"model = Model(HiGHS.Optimizer)\nset_silent(model)","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"Next, we create a set of decision variables x, with one element for each row in the DataFrame, and each x has a lower bound of 0:","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"@variable(model, x[foods.name] >= 0)","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"To simplify things later on, we store the vector as a new column x in the DataFrame foods. Since x is a DenseAxisArray, we first need to convert it to an Array:","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"foods.x = Array(x)","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"Our objective is to minimize the total cost of purchasing food:","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"@objective(model, Min, sum(foods.cost .* foods.x));\nnothing #hide","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"For the next component, we need to add a constraint that our total intake of each component is within the limits contained in the limits DataFrame:","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"@constraint(\n model,\n [row in eachrow(limits)],\n row.min <= sum(foods[!, row.nutrient] .* foods.x) <= row.max,\n);\nnothing #hide","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"What does our model look like?","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"print(model)","category":"page"},{"location":"tutorials/linear/diet/#Solution","page":"The diet problem","title":"Solution","text":"","category":"section"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"Let's optimize and take a look at the solution:","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"optimize!(model)\n@assert is_solved_and_feasible(model)\nTest.@test objective_value(model) ≈ 11.8288 atol = 1e-4 #hide\nsolution_summary(model)","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"We found an optimal solution. Let's see what the optimal solution is:","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"for row in eachrow(foods)\n println(row.name, \" = \", value(row.x))\nend","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"That's a lot of milk and ice cream, and sadly, we only get 0.6 of a hamburger.","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"We can also use the function Containers.rowtable to easily convert the result into a DataFrame:","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"table = Containers.rowtable(value, x; header = [:food, :quantity])\nsolution = DataFrames.DataFrame(table)","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"This makes it easy to perform analyses our solution:","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"filter!(row -> row.quantity > 0.0, solution)","category":"page"},{"location":"tutorials/linear/diet/#Problem-modification","page":"The diet problem","title":"Problem modification","text":"","category":"section"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"JuMP makes it easy to take an existing model and modify it by adding extra constraints. Let's see what happens if we add a constraint that we can buy at most 6 units of milk or ice cream combined.","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"dairy_foods = [\"milk\", \"ice cream\"]\nis_dairy = map(name -> name in dairy_foods, foods.name)\ndairy_constraint = @constraint(model, sum(foods[is_dairy, :x]) <= 6)\noptimize!(model)\nTest.@test !is_solved_and_feasible(model)\nTest.@test termination_status(model) == INFEASIBLE\nTest.@test primal_status(model) == NO_SOLUTION\nsolution_summary(model)","category":"page"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"There exists no feasible solution to our problem. Looks like we're stuck eating ice cream for dinner.","category":"page"},{"location":"tutorials/linear/diet/#Next-steps","page":"The diet problem","title":"Next steps","text":"","category":"section"},{"location":"tutorials/linear/diet/","page":"The diet problem","title":"The diet problem","text":"You can delete a constraint using delete(model, dairy_constraint). Can you add a different constraint to provide a diet with less dairy?\nSome food items (like hamburgers) are discrete. You can use set_integer to force a variable to take integer values. What happens to the solution if you do?","category":"page"},{"location":"tutorials/linear/n-queens/","page":"N-Queens","title":"N-Queens","text":"EditURL = \"n-queens.jl\"","category":"page"},{"location":"tutorials/linear/n-queens/#N-Queens","page":"N-Queens","title":"N-Queens","text":"","category":"section"},{"location":"tutorials/linear/n-queens/","page":"N-Queens","title":"N-Queens","text":"This tutorial was generated using Literate.jl. Download the source as a .jl file.","category":"page"},{"location":"tutorials/linear/n-queens/","page":"N-Queens","title":"N-Queens","text":"This tutorial was originally contributed by Matthew Helm and Mathieu Tanneau.","category":"page"},{"location":"tutorials/linear/n-queens/","page":"N-Queens","title":"N-Queens","text":"The N-Queens problem involves placing N queens on an N x N chessboard such that none of the queens attacks another. In chess, a queen can move vertically, horizontally, and diagonally so there cannot be more than one queen on any given row, column, or diagonal.","category":"page"},{"location":"tutorials/linear/n-queens/","page":"N-Queens","title":"N-Queens","text":"(Image: Four Queens)","category":"page"},{"location":"tutorials/linear/n-queens/","page":"N-Queens","title":"N-Queens","text":"Note that none of the queens above are able to attack any other as a result of their careful placement.","category":"page"},{"location":"tutorials/linear/n-queens/","page":"N-Queens","title":"N-Queens","text":"using JuMP\nimport HiGHS\nimport LinearAlgebra","category":"page"},{"location":"tutorials/linear/n-queens/","page":"N-Queens","title":"N-Queens","text":"N-Queens","category":"page"},{"location":"tutorials/linear/n-queens/","page":"N-Queens","title":"N-Queens","text":"N = 8\n\nmodel = Model(HiGHS.Optimizer)\nset_silent(model)","category":"page"},{"location":"tutorials/linear/n-queens/","page":"N-Queens","title":"N-Queens","text":"Next, let's create an N x N chessboard of binary values. 0 will represent an empty space on the board and 1 will represent a space occupied by one of our queens:","category":"page"},{"location":"tutorials/linear/n-queens/","page":"N-Queens","title":"N-Queens","text":"@variable(model, x[1:N, 1:N], Bin);\nnothing #hide","category":"page"},{"location":"tutorials/linear/n-queens/","page":"N-Queens","title":"N-Queens","text":"Now we can add our constraints:","category":"page"},{"location":"tutorials/linear/n-queens/","page":"N-Queens","title":"N-Queens","text":"There must be exactly one queen in a given row/column","category":"page"},{"location":"tutorials/linear/n-queens/","page":"N-Queens","title":"N-Queens","text":"for i in 1:N\n @constraint(model, sum(x[i, :]) == 1)\n @constraint(model, sum(x[:, i]) == 1)\nend","category":"page"},{"location":"tutorials/linear/n-queens/","page":"N-Queens","title":"N-Queens","text":"There can only be one queen on any given diagonal","category":"page"},{"location":"tutorials/linear/n-queens/","page":"N-Queens","title":"N-Queens","text":"for i in -(N - 1):(N-1)\n @constraint(model, sum(LinearAlgebra.diag(x, i)) <= 1)\n @constraint(model, sum(LinearAlgebra.diag(reverse(x; dims = 1), i)) <= 1)\nend","category":"page"},{"location":"tutorials/linear/n-queens/","page":"N-Queens","title":"N-Queens","text":"We are ready to put our model to work and see if it is able to find a feasible solution:","category":"page"},{"location":"tutorials/linear/n-queens/","page":"N-Queens","title":"N-Queens","text":"optimize!(model)\n@assert is_solved_and_feasible(model)","category":"page"},{"location":"tutorials/linear/n-queens/","page":"N-Queens","title":"N-Queens","text":"We can now review the solution that our model found:","category":"page"},{"location":"tutorials/linear/n-queens/","page":"N-Queens","title":"N-Queens","text":"solution = round.(Int, value.(x))","category":"page"},{"location":"packages/MAiNGO/","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO-github/MAiNGO.jl","text":"EditURL = \"https://github.com/MAiNGO-github/MAiNGO.jl/blob/v0.2.2/README.md\"","category":"page"},{"location":"packages/MAiNGO/#MAiNGO.jl","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO.jl","text":"","category":"section"},{"location":"packages/MAiNGO/#What-is-MAiNGO?","page":"MAiNGO-github/MAiNGO.jl","title":"What is MAiNGO?","text":"","category":"section"},{"location":"packages/MAiNGO/","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO-github/MAiNGO.jl","text":"MAiNGO (McCormick-based Algorithm for mixed-integer Nonlinear Global Optimization) is a deterministic global optimization solver for nonconvex mixed-integer nonlinear programs (MINLPs). For more information on MAiNGO, including installation, usage, and licensing, please see the repository and the documentation.","category":"page"},{"location":"packages/MAiNGO/","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO-github/MAiNGO.jl","text":"MAiNGO.jl is a wrapper for using MAiNGO in Julia. It requires a working installation of MAiNGO, either the standalone version with parser support (Mode A), or the shared parser library version (Mode B). When building MAiNGO from source this is configurable in the CMake configuration of MAiNGO. Per default, precompiled version of MAiNGO is used that operates in Mode B. ","category":"page"},{"location":"packages/MAiNGO/#Using-the-precompiled-version-of-MAiNGO-from-the-Julia-Package-Manager","page":"MAiNGO-github/MAiNGO.jl","title":"Using the precompiled version of MAiNGO from the Julia Package Manager","text":"","category":"section"},{"location":"packages/MAiNGO/","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO-github/MAiNGO.jl","text":"A Julia package containing a precompiled version of MAiNGO is available (MAiNGO_jll). This version is used by default on supported platforms (Linux/MacOs/Windows), but this can be changed (see here). The precompiled version contains only open-source components. If you would like to use commercial subsolvers with MAiNGO (for example CPLEX or KNITRO), it might still make sense to compile MAiNGO yourself and use this version rather than the precompiled one.","category":"page"},{"location":"packages/MAiNGO/#Quick-start","page":"MAiNGO-github/MAiNGO.jl","title":"Quick start","text":"","category":"section"},{"location":"packages/MAiNGO/","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO-github/MAiNGO.jl","text":"using MAiNGO # if this fails, you need to add the package first manually\nusing JuMP\n#Set options in constructor\nmodel=Model(optimizer_with_attributes(MAiNGO.Optimizer, \"epsilonA\"=> 1e-8))\nset_silent(model)\n\n@variable(model, x, lower_bound=-20, upper_bound=20)\n@variable(model, 0<=y<=2)\n@variable(model, 0<=z<=2)\n@variable(model, 0<=d<=2)\n@variable(model, 0<=l<=6)\n@variable(model, 0<=b<=6)\n\n@NLobjective(model, Min, y*-1*x^2*(exp(-x^2+z+d+b)+l*b))\n@NLconstraint(model,(x^2+y^2>=1))\nJuMP.optimize!(model)\n#query results\nprintln(value(x),\" \",value(y))\nprintln(termination_status(model))\nprintln(primal_status(model))","category":"page"},{"location":"packages/MAiNGO/#Using-a-custom-MAiNGO-version","page":"MAiNGO-github/MAiNGO.jl","title":"Using a custom MAiNGO version","text":"","category":"section"},{"location":"packages/MAiNGO/","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO-github/MAiNGO.jl","text":"If you want to make use of a MAiNGO version that you build from source yourself, you have to give the path to the correct binary file. The correct path depends on the mode of operation.","category":"page"},{"location":"packages/MAiNGO/#Modes-of-operation","page":"MAiNGO-github/MAiNGO.jl","title":"Modes of operation","text":"","category":"section"},{"location":"packages/MAiNGO/","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO-github/MAiNGO.jl","text":"The following library allows to call MAiNGO from Julia. Currently two modes are supported:","category":"page"},{"location":"packages/MAiNGO/#Mode-A)","page":"MAiNGO-github/MAiNGO.jl","title":"Mode A)","text":"","category":"section"},{"location":"packages/MAiNGO/","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO-github/MAiNGO.jl","text":"Using MAiNGO standalone exe with compiled parser support. This only allows to construct the problem in JuMP and call the MAiNGO executable with the filepath. Thus, results are obtained in form of an output text file.","category":"page"},{"location":"packages/MAiNGO/","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO-github/MAiNGO.jl","text":"If a JSON file is also written (by setting the corresponding MAiNGO option), then the contents of that file are parsed, allowing to query the model from JuMP. This requires the JSON module to be installed in Julia.","category":"page"},{"location":"packages/MAiNGO/","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO-github/MAiNGO.jl","text":"#Set path to MAiNGO standalone exe with compiled parser support.\nENV[\"MAINGO_EXEC\"] = \"W:\\\\maingo_build\\\\Debug\\\\MAiNGO.exe\" #replace \"W:\\\\maingo_build\\\\Debug\\\\\" with path to MAiNGO.exe\nusing MAiNGO # if this fails, you need to add the package first manually\n#create model\nusing JuMP\nmodel=Model(MAiNGO.Optimizer)\n@variable(model, x, lower_bound=0, upper_bound=5)\n@variable(model, y, lower_bound=0, upper_bound=2, start=0.5)\n\n#The following also works:\n#@variable(model,x in MOI.Interval(0,5)) \n#@variable(model, 0<=x<=5)\n#For integer variables use \n#@variable(model, y in MOI.Integer(), start=0.5)\n\n@constraint(model,y+x<=5)\n@constraint(model,x+y>=4)\n#Linear objective is also possible\n#@objective(model, Max, (1 - x)*y)\n@NLobjective(model, Max, (1 - x)^2 + 100 * (y - x^2)^2)\n@NLconstraint(model,min(x^2+y^2,y)<=5+y^2)\nMOI.set(model, MOI.RawOptimizerAttribute(\"writeJson\"),1) # write JSON file to enable querying of results from JuMP\nJuMP.optimize!(model)\nprintln(objective_value(model))","category":"page"},{"location":"packages/MAiNGO/#Mode-B)","page":"MAiNGO-github/MAiNGO.jl","title":"Mode B)","text":"","category":"section"},{"location":"packages/MAiNGO/","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO-github/MAiNGO.jl","text":"Compiling an interface presenting a C-API to Julia. This must be configured when building MAiNGO, but allows several improvements. The problem definition is passed in memory. Settings can be set from within Julia/JuMP and the results are returned as Julia variables/ are queryable from JuMP.","category":"page"},{"location":"packages/MAiNGO/","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO-github/MAiNGO.jl","text":"For example:","category":"page"},{"location":"packages/MAiNGO/","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO-github/MAiNGO.jl","text":"#Set path to shared library with C-API.\nENV[\"MAINGO_LIB\"]=\"W:\\\\maingo_build\\\\Debug\\\\shared_parser.dll\" #replace \"W:\\\\maingo_build\\\\Debug\\\\\" with path to shared_parser.dll\n#include the wrapper\nusing MAiNGO # if this fails, you need to add the package first manually\n\n#Set options in constructor\nmodel=Model(optimizer_with_attributes(MAiNGO.Optimizer, \"epsilonA\"=> 1e-8,\"res_name\"=>\"res_new.txt\",\"prob_name\"=>\"problem.txt\"))\n#Alternate syntax\n#model=Model(() -> MAiNGO.Optimizer(epsilonA=1e-8))#, \"options\" => options))\n\n@variable(model, x, lower_bound=-20, upper_bound=20)\n#@variable(model, y in MOI.Integer(),lower_bound=-10,upper_bound=10, start=0.5)\n#Alterntaive forms\n#@variable(model,x in MOI.Interval(0,5))\n#@variable(model,y in MOI.Interval(0,2))\n#@variable(model, 0<=x<=5)\n@variable(model, 0<=y<=2)\n@variable(model, 0<=z<=2)\n@variable(model, 0<=d<=2)\n@variable(model, 0<=l<=6)\n@variable(model, 0<=b<=6)\n#@constraint(model,y+x<=5)\n#@constraint(model,x+y>=4)\n\n@NLobjective(model, Min, y*-1*x^2*(exp(-x^2+z+d+b)+l*b))\n@NLconstraint(model,(x^2+y^2>=1))\nJuMP.optimize!(model)\n#C-API allows us to query results\nprintln(value(x),\" \",value(y))\nprintln(termination_status(model))\nprintln(primal_status(model))","category":"page"},{"location":"packages/MAiNGO/#Supported-MAiNGO-Options","page":"MAiNGO-github/MAiNGO.jl","title":"Supported MAiNGO Options","text":"","category":"section"},{"location":"packages/MAiNGO/","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO-github/MAiNGO.jl","text":"Both modes of operation allow setting MAiNGO options through the MatOptInterface-API. An example of how to do so is given below. All numerical and boolean options that are available in MAiNGO can be set using the MOI.RawOptimizerAttribute() function. Additionally, the following options can also be set through specific other MOI functions:","category":"page"},{"location":"packages/MAiNGO/","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO-github/MAiNGO.jl","text":"Solver time limit (in seconds): MOI.TimeLimitSec()\nAbsolute gap: MOI.AbsoluteGapTolerance()\nRelative gap: MOI.RelativeGapTolerance()\nSilencing output: MOI.Silent() (this overwrites any other verbosity settings)","category":"page"},{"location":"packages/MAiNGO/","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO-github/MAiNGO.jl","text":"# assuming necessary paths and using-statements have already been set\nmodel = Model(MAiNGO.Optimizer)\nMOI.set(model, MOI.Silent(), true) # silence all MAiNGO output\nMOI.set(model, MOI.AbsoluteGapTolerance(), 1e-8) # set the absolute gap tolerance\nMOI.set(model, MOI.RawOptimizerAttribute(\"PRE_pureMultistart\"), 1) # example of setting an option via the MOI.RawOptimizerAttribute() function","category":"page"},{"location":"packages/MAiNGO/#Switching-between-modes,-finding-the-MAiNGO-executable","page":"MAiNGO-github/MAiNGO.jl","title":"Switching between modes, finding the MAiNGO executable","text":"","category":"section"},{"location":"packages/MAiNGO/","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO-github/MAiNGO.jl","text":"If you need to update the path to the MAiNGO executable during a session, this can be done as follows:","category":"page"},{"location":"packages/MAiNGO/","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO-github/MAiNGO.jl","text":"\nusing MAiNGO\n# by default, MAiNGO_jll will be used\n# explicitly force use of standalone version (mode A)\nENV[\"MAINGO_EXEC\"] = \"W:\\\\maingo_build\\\\Debug\\\\MAiNGO.exe\"\nfindMAiNGO(preferred=MAiNGO.C_API) # see note on \"preferred\"-argument below\n# ...\n# for example switch to release version of MAiNGO\nENV[\"MAINGO_EXEC\"] = \"W:\\\\maingo_build\\\\Release\\\\MAiNGO.exe\"\nfindMAiNGO(preferred=MAiNGO.C_API)\n# now switch to C-API (mode B)\nENV[\"MAINGO_LIB\"]=\"W:\\\\maingo_build\\\\Debug\\\\shared_parser.dll\" #replace \"W:\\\\maingo_build\\\\Debug\\\\\" with path to shared_parser.dll\nfindMAiNGO(preferred=MAiNGO.C_API)\n# switch back to MAiNGO_jll\nfindMAiNGO(preferred=MAiNGO.MAINGO_JLL)","category":"page"},{"location":"packages/MAiNGO/","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO-github/MAiNGO.jl","text":"The findMAiNGO() function takes several optional arguments, which can be passed as keyword-arguments:","category":"page"},{"location":"packages/MAiNGO/","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO-github/MAiNGO.jl","text":"verbose: boolean, whether or not progress on finding MAiNGO is reported. (Default value: false)\npreferred: either MAiNGO.MAINGOJLL or MAiNGO.CAPI, determines whether jll binaries or custom installation of MAiNGO is preferred. Note that the C-API is always preferred to the standalone version. If a custom standalone version should be used, set this value to C-API and pass an empty string as the capi argument (see next). (Default value: MAINGOJLL)\ncapi: string, path to C-API file. If set, this overrides the environment variable MAINGOLIB.\nstandalone: string, path to standalone executable file. If set, this overrides the environment variable MAINGO_EXEC.","category":"page"},{"location":"packages/MAiNGO/","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO-github/MAiNGO.jl","text":"For example, to use the C-API at a new location, one could call:","category":"page"},{"location":"packages/MAiNGO/","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO-github/MAiNGO.jl","text":"using MAiNGO\nfindMAiNGO(preferred=MAiNGO.C_API, c_api=\"path\\\\to\\\\c\\\\api\\\\shared_parser.dll\")","category":"page"},{"location":"packages/MAiNGO/#Currently-working:","page":"MAiNGO-github/MAiNGO.jl","title":"Currently working:","text":"","category":"section"},{"location":"packages/MAiNGO/","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO-github/MAiNGO.jl","text":"Integer and binary variables.\nAffine, Quadratic and nonlinear constraints and objectives.\nOperations: min,max,*,/,+,-,-(unary), exp,log,abs,sqrt,^\nOther operations are easy to add if supported by MathOptInterface,ALE and MAiNGO.\nWriting problem defined in JuMP syntax to an ALE problem.txt and calling MAiNGO.exe on a specified path.\nAlternatively using a C-API to call MAiNGO.","category":"page"},{"location":"packages/MAiNGO/#Restrictions-compared-to-using-the-Python-or-C-interface","page":"MAiNGO-github/MAiNGO.jl","title":"Restrictions compared to using the Python or C++ interface","text":"","category":"section"},{"location":"packages/MAiNGO/","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO-github/MAiNGO.jl","text":"It is assumed that all variables are bounded. This interface assumes that integer variables are bounded between -1e6 and 1e6. For real variables these bounds are -1e8 and 1e8.","category":"page"},{"location":"packages/MAiNGO/","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO-github/MAiNGO.jl","text":"Other functionality such as special support for growing datasets or MPI parallelization is not currently supported via this wrapper. Additionally, constraint formulations are simply passed from their representation in JuMP/MathOptInterface to MAiNGO. As such, there is no way to make use of advanced techniques such as defining constraints that are only used for the relaxations, using special relaxations for functions used in thermodynamics and process engineering or formulating reduced space formulations.","category":"page"},{"location":"packages/MAiNGO/#Tests","page":"MAiNGO-github/MAiNGO.jl","title":"Tests","text":"","category":"section"},{"location":"packages/MAiNGO/","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO-github/MAiNGO.jl","text":"A subset of test cases for MathOptInterface solvers can be run by running the script ./test/runtests.jl. The current release was tested in the following combinations:","category":"page"},{"location":"packages/MAiNGO/","page":"MAiNGO-github/MAiNGO.jl","title":"MAiNGO-github/MAiNGO.jl","text":"Julia 1.8.5 and MathOptInterface v1.18.0\nJulia 1.9.4 and MathOptInterface v1.23.0.","category":"page"},{"location":"packages/SDPNAL/","page":"jump-dev/SDPNAL.jl","title":"jump-dev/SDPNAL.jl","text":"EditURL = \"https://github.com/jump-dev/SDPNAL.jl/blob/00a3fa19f4e1235587948113b0b681da17f4dab5/README.md\"","category":"page"},{"location":"packages/SDPNAL/#SDPNAL.jl","page":"jump-dev/SDPNAL.jl","title":"SDPNAL.jl","text":"","category":"section"},{"location":"packages/SDPNAL/","page":"jump-dev/SDPNAL.jl","title":"jump-dev/SDPNAL.jl","text":"SDPNAL.jl is wrapper for the SDPNALplus solver.","category":"page"},{"location":"packages/SDPNAL/","page":"jump-dev/SDPNAL.jl","title":"jump-dev/SDPNAL.jl","text":"The wrapper has two components:","category":"page"},{"location":"packages/SDPNAL/","page":"jump-dev/SDPNAL.jl","title":"jump-dev/SDPNAL.jl","text":"an exported sdpnalplus function that is a thin wrapper on top of the sdpnalplus MATLAB function\nan interface to MathOptInterface","category":"page"},{"location":"packages/SDPNAL/#Affiliation","page":"jump-dev/SDPNAL.jl","title":"Affiliation","text":"","category":"section"},{"location":"packages/SDPNAL/","page":"jump-dev/SDPNAL.jl","title":"jump-dev/SDPNAL.jl","text":"This wrapper is maintained by the JuMP community and is not an official wrapper of SDPNALplus.","category":"page"},{"location":"packages/SDPNAL/#License","page":"jump-dev/SDPNAL.jl","title":"License","text":"","category":"section"},{"location":"packages/SDPNAL/","page":"jump-dev/SDPNAL.jl","title":"jump-dev/SDPNAL.jl","text":"SDPNAL.jl is licensed under the MIT License.","category":"page"},{"location":"packages/SDPNAL/","page":"jump-dev/SDPNAL.jl","title":"jump-dev/SDPNAL.jl","text":"The underlying solver, SDPNALplus is licensed under the Creative Commons Attribution-ShareAlike 4.0 International Public License.","category":"page"},{"location":"packages/SDPNAL/","page":"jump-dev/SDPNAL.jl","title":"jump-dev/SDPNAL.jl","text":"In addition, SDPNAL requires an installation of MATLAB, which is a closed-source commercial product for which you must obtain a license.","category":"page"},{"location":"packages/SDPNAL/#Use-with-JuMP","page":"jump-dev/SDPNAL.jl","title":"Use with JuMP","text":"","category":"section"},{"location":"packages/SDPNAL/","page":"jump-dev/SDPNAL.jl","title":"jump-dev/SDPNAL.jl","text":"To use SDPNAL with JuMP, do:","category":"page"},{"location":"packages/SDPNAL/","page":"jump-dev/SDPNAL.jl","title":"jump-dev/SDPNAL.jl","text":"using JuMP, SDPNAL\nmodel = Model(SDPNAL.Optimizer)\nset_attribute(model, \"printlevel\", 0)","category":"page"},{"location":"packages/SDPNAL/","page":"jump-dev/SDPNAL.jl","title":"jump-dev/SDPNAL.jl","text":"Note that, contrary to implementation of other solver-independent interfaces, using SDPNAL from JuMP or MOI fully exploits the particular structures of the SDPNAL interface and does not create superfluous slack variables and equality constraints as discussed in the SDPNAL guide:","category":"page"},{"location":"packages/SDPNAL/","page":"jump-dev/SDPNAL.jl","title":"jump-dev/SDPNAL.jl","text":"A new interface is necessary to facilitate the modeling of an SDP problem for SDPNAL+ because of latter’s flexibility to directly accept inequality constraints of the form “l ≤ B(X) ≤ u”, and bound constraints of the form “L ≤ X ≤ U”. The flexibility can significantly simplify the generation of the data in the SDPNAL+ format as compared to what need to be done in CVX or YALMIP to reformulate them as equality constraints through introducing extra variables. In addition, the final number of equality constraints present in the data input to SDPNAL+ can also be substantially fewer than those present in CVX or YALMIP. It is important to note here that the number of equality constraints present in the generated problem data can greatly affect the computational efficiency of the solvers, especially for interior-point based solvers.","category":"page"},{"location":"packages/SDPNAL/#Installation","page":"jump-dev/SDPNAL.jl","title":"Installation","text":"","category":"section"},{"location":"packages/SDPNAL/","page":"jump-dev/SDPNAL.jl","title":"jump-dev/SDPNAL.jl","text":"First, make sure that you satisfy the requirements of the MATLAB.jl Julia package, and that the SDPNALplus software is installed in your MATLAB™ installation.","category":"page"},{"location":"packages/SDPNAL/","page":"jump-dev/SDPNAL.jl","title":"jump-dev/SDPNAL.jl","text":"Then, install SDPNAL.jl using Pkg.add:","category":"page"},{"location":"packages/SDPNAL/","page":"jump-dev/SDPNAL.jl","title":"jump-dev/SDPNAL.jl","text":"import Pkg\nPkg.add(\"SDPNAL\")","category":"page"},{"location":"packages/SDPNAL/","page":"jump-dev/SDPNAL.jl","title":"jump-dev/SDPNAL.jl","text":"There is a startup.m file at the root of the SDPNAL folder. This adds all subdirectories recursively when MATLAB starts. However, the interface directory contains a .git subdirectory which contains a very large number of files. Because of this, MATLAB crashes if SDPNAL is in its path because the startup.m requests MATLAB to try to parse all the files in the .git folder. To resolve this problem, delete the startup.m file and .git folder, and add the subdirectories manually your toolbox/local/pathdef.m file as follows:","category":"page"},{"location":"packages/SDPNAL/","page":"jump-dev/SDPNAL.jl","title":"jump-dev/SDPNAL.jl","text":"function p = pathdef\n\n% (...)\n\np = [...\n%%% BEGIN ENTRIES %%%\n'/path/to/SDPNALv1.0:', ...\n'/path/to/SDPNALv1.0/interface:', ...\n'/path/to/SDPNALv1.0/mexfun:', ...\n'/path/to/SDPNALv1.0/solver:', ...\n'/path/to/SDPNALv1.0/solver_main_default:', ...\n'/path/to/SDPNALv1.0/util:', ...\n% (...)","category":"page"},{"location":"packages/SDPNAL/","page":"jump-dev/SDPNAL.jl","title":"jump-dev/SDPNAL.jl","text":"If you have SDPT3 in addition to SDPNAL in the MATLAB path (that is, the toolbox/local/pathdef.m file) then you might have issues because both solvers define a validate function, and this might make SDPNAL call SDPT3's validate function instead of SDPT3's validate function.","category":"page"},{"location":"should_i_use/#Should-you-use-JuMP?","page":"Should you use JuMP?","title":"Should you use JuMP?","text":"","category":"section"},{"location":"should_i_use/","page":"Should you use JuMP?","title":"Should you use JuMP?","text":"JuMP is an algebraic modeling language for mathematical optimization written in the Julia language.","category":"page"},{"location":"should_i_use/","page":"Should you use JuMP?","title":"Should you use JuMP?","text":"This page explains when you should consider using JuMP, and importantly, when you should not use JuMP.","category":"page"},{"location":"should_i_use/#When-should-you-use-JuMP?","page":"Should you use JuMP?","title":"When should you use JuMP?","text":"","category":"section"},{"location":"should_i_use/","page":"Should you use JuMP?","title":"Should you use JuMP?","text":"You should use JuMP if you have a constrained optimization problem that is formulated using the language of mathematical programming, that is, the problem has:","category":"page"},{"location":"should_i_use/","page":"Should you use JuMP?","title":"Should you use JuMP?","text":"a set of real- or complex-valued decision variables\na scalar- or vector-valued real objective function\na set of constraints.","category":"page"},{"location":"should_i_use/","page":"Should you use JuMP?","title":"Should you use JuMP?","text":"Key reasons to use JuMP include:","category":"page"},{"location":"should_i_use/","page":"Should you use JuMP?","title":"Should you use JuMP?","text":"User friendliness\nJuMP has syntax that mimics natural mathematical expressions. (See the section on algebraic modeling languages.)\nSolver independence\nJuMP uses a generic solver-independent interface provided by the MathOptInterface package, making it easy to change between a number of open-source and commercial optimization software packages (\"solvers\"). The Supported solvers section contains a table of the currently supported solvers.\nEase of embedding\nJuMP itself is written purely in Julia. Solvers are the only binary dependencies.\nJuMP provides automatic installation of most solvers.\nBecause it is embedded in a general-purpose programming language, JuMP makes it easy to solve optimization problems as part of a larger workflow, for example, inside a simulation, behind a web server, or as a subproblem in a decomposition algorithm. As a trade-off, JuMP's syntax is constrained by the syntax and functionality available in Julia.\nJuMP is MPL licensed, meaning that it can be embedded in commercial software that complies with the terms of the license.\nSpeed\nBenchmarking has shown that JuMP can create problems at similar speeds to special-purpose modeling languages such as AMPL.\nJuMP communicates with most solvers in memory, avoiding the need to write intermediary files.\nAccess to advanced algorithmic techniques\nJuMP supports efficient in-memory re-solves of models.\nJuMP provides access to solver-independent and solver-dependent Callbacks.","category":"page"},{"location":"should_i_use/#When-should-you-not-use-JuMP?","page":"Should you use JuMP?","title":"When should you not use JuMP?","text":"","category":"section"},{"location":"should_i_use/","page":"Should you use JuMP?","title":"Should you use JuMP?","text":"JuMP supports a broad range of optimization classes. However, there are still some that it doesn't support, or that are better supported by other software packages.","category":"page"},{"location":"should_i_use/#You-want-to-optimize-a-complicated-Julia-function","page":"Should you use JuMP?","title":"You want to optimize a complicated Julia function","text":"","category":"section"},{"location":"should_i_use/","page":"Should you use JuMP?","title":"Should you use JuMP?","text":"Packages in Julia compose well. It's common for people to pick two unrelated packages and use them in conjunction to create novel behavior. JuMP isn't one of those packages.","category":"page"},{"location":"should_i_use/","page":"Should you use JuMP?","title":"Should you use JuMP?","text":"If you want to optimize an ordinary differential equation from DifferentialEquations.jl or tune a neural network from Flux.jl, consider using other packages such as:","category":"page"},{"location":"should_i_use/","page":"Should you use JuMP?","title":"Should you use JuMP?","text":"Optim.jl\nOptimization.jl\nNLPModels.jl\nNonconvex.jl","category":"page"},{"location":"should_i_use/#Black-box,-derivative-free,-or-unconstrained-optimization","page":"Should you use JuMP?","title":"Black-box, derivative free, or unconstrained optimization","text":"","category":"section"},{"location":"should_i_use/","page":"Should you use JuMP?","title":"Should you use JuMP?","text":"JuMP does support nonlinear programs with constraints and objectives containing user-defined operators. However, the functions must be automatically differentiable, or need to provide explicit derivatives. (See User-defined operators for more information.)","category":"page"},{"location":"should_i_use/","page":"Should you use JuMP?","title":"Should you use JuMP?","text":"If your function is a black-box that is non-differentiable (for example, it is the output of a simulation written in C++), JuMP is not the right tool for the job. This also applies if you want to use a derivative free method.","category":"page"},{"location":"should_i_use/","page":"Should you use JuMP?","title":"Should you use JuMP?","text":"Even if your problem is differentiable, if it is unconstrained there is limited benefit (and downsides in the form of more overhead) to using JuMP over tools which are only concerned with function minimization.","category":"page"},{"location":"should_i_use/","page":"Should you use JuMP?","title":"Should you use JuMP?","text":"Alternatives to consider are:","category":"page"},{"location":"should_i_use/","page":"Should you use JuMP?","title":"Should you use JuMP?","text":"Optim.jl\nOptimization.jl\nNLopt.jl","category":"page"},{"location":"should_i_use/#Disciplined-convex-programming","page":"Should you use JuMP?","title":"Disciplined convex programming","text":"","category":"section"},{"location":"should_i_use/","page":"Should you use JuMP?","title":"Should you use JuMP?","text":"JuMP does not support disciplined convex programming (DCP).","category":"page"},{"location":"should_i_use/","page":"Should you use JuMP?","title":"Should you use JuMP?","text":"Alternatives to consider are:","category":"page"},{"location":"should_i_use/","page":"Should you use JuMP?","title":"Should you use JuMP?","text":"Convex.jl\nCVXPY [Python]\nYALMIP [MATLAB]","category":"page"},{"location":"should_i_use/","page":"Should you use JuMP?","title":"Should you use JuMP?","text":"note: Note\nConvex.jl is also built on MathOptInterface, and shares the same set of underlying solvers. However, you input problems differently, and Convex.jl checks that the problem is DCP.","category":"page"},{"location":"should_i_use/#Stochastic-programming","page":"Should you use JuMP?","title":"Stochastic programming","text":"","category":"section"},{"location":"should_i_use/","page":"Should you use JuMP?","title":"Should you use JuMP?","text":"JuMP requires deterministic input data.","category":"page"},{"location":"should_i_use/","page":"Should you use JuMP?","title":"Should you use JuMP?","text":"If you have stochastic input data, consider using a JuMP extension such as:","category":"page"},{"location":"should_i_use/","page":"Should you use JuMP?","title":"Should you use JuMP?","text":"InfiniteOpt.jl\nStochasticPrograms.jl\nSDDP.jl","category":"page"},{"location":"should_i_use/#Polyhedral-computations","page":"Should you use JuMP?","title":"Polyhedral computations","text":"","category":"section"},{"location":"should_i_use/","page":"Should you use JuMP?","title":"Should you use JuMP?","text":"JuMP does not provide tools for working with the polyhedron formed by the set of linear constraints.","category":"page"},{"location":"should_i_use/","page":"Should you use JuMP?","title":"Should you use JuMP?","text":"Alternatives to consider are:","category":"page"},{"location":"should_i_use/","page":"Should you use JuMP?","title":"Should you use JuMP?","text":"Polyhedra.jl (See the documentation to create a polyhedron from a JuMP model.)","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"EditURL = \"design_patterns_for_larger_models.jl\"","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/#Design-patterns-for-larger-models","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"","category":"section"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"This tutorial was generated using Literate.jl. Download the source as a .jl file.","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"JuMP makes it easy to build and solve optimization models. However, once you start to construct larger models, and especially ones that interact with external data sources or have customizable sets of variables and constraints based on client choices, you may find that your scripts become unwieldy. This tutorial demonstrates a variety of ways in which you can structure larger JuMP models to improve their readability and maintainability.","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"tip: Tip\nThis tutorial is more advanced than the other \"Getting started\" tutorials. It's in the \"Getting started\" section to give you an early preview of how JuMP makes it easy to structure larger models. However, if you are new to JuMP you may want to briefly skim the tutorial, and come back to it once you have written a few JuMP models.","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/#Overview","page":"Design patterns for larger models","title":"Overview","text":"","category":"section"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"This tutorial uses explanation-by-example. We're going to start with a simple knapsack model, and then expand it to add various features and structure.","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/#A-simple-script","page":"Design patterns for larger models","title":"A simple script","text":"","category":"section"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"Your first prototype of a JuMP model is probably a script that uses a small set of hard-coded data.","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"using JuMP, HiGHS\nprofit = [5, 3, 2, 7, 4]\nweight = [2, 8, 4, 2, 5]\ncapacity = 10\nN = 5\nmodel = Model(HiGHS.Optimizer)\n@variable(model, x[1:N], Bin)\n@objective(model, Max, sum(profit[i] * x[i] for i in 1:N))\n@constraint(model, sum(weight[i] * x[i] for i in 1:N) <= capacity)\noptimize!(model)\n@assert is_solved_and_feasible(model)\nvalue.(x)","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"The benefits of this approach are:","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"it is quick to code\nit is quick to make changes.","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"The downsides include:","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"all variables are global (read Performance tips)\nit is easy to introduce errors, for example, having profit and weight be vectors of different lengths, or not match N\nthe solution, x[i], is hard to interpret without knowing the order in which we provided the data.","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/#Wrap-the-model-in-a-function","page":"Design patterns for larger models","title":"Wrap the model in a function","text":"","category":"section"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"A good next step is to wrap your model in a function. This is useful for a few reasons:","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"it removes global variables\nit encapsulates the JuMP model and forces you to clarify your inputs and outputs\nwe can add some error checking.","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"function solve_knapsack_1(profit::Vector, weight::Vector, capacity::Real)\n if length(profit) != length(weight)\n throw(DimensionMismatch(\"profit and weight are different sizes\"))\n end\n N = length(weight)\n model = Model(HiGHS.Optimizer)\n @variable(model, x[1:N], Bin)\n @objective(model, Max, sum(profit[i] * x[i] for i in 1:N))\n @constraint(model, sum(weight[i] * x[i] for i in 1:N) <= capacity)\n optimize!(model)\n @assert is_solved_and_feasible(model)\n return value.(x)\nend\n\nsolve_knapsack_1([5, 3, 2, 7, 4], [2, 8, 4, 2, 5], 10)","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/#Create-better-data-structures","page":"Design patterns for larger models","title":"Create better data structures","text":"","category":"section"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"Although we can check for errors like mis-matched vector lengths, if you start to develop models with a lot of data, keeping track of vectors and lengths and indices is fragile and a common source of bugs. A good solution is to use Julia's type system to create an abstraction over your data.","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"For example, we can create a struct that represents a single object, with a constructor that lets us validate assumptions on the input data:","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"struct KnapsackObject\n profit::Float64\n weight::Float64\n function KnapsackObject(profit::Float64, weight::Float64)\n if weight < 0\n throw(DomainError(\"Weight of object cannot be negative\"))\n end\n return new(profit, weight)\n end\nend","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"as well as a struct that holds a dictionary of objects and the knapsack's capacity:","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"struct KnapsackData\n objects::Dict{String,KnapsackObject}\n capacity::Float64\nend","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"Here's what our data might look like now:","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"objects = Dict(\n \"apple\" => KnapsackObject(5.0, 2.0),\n \"banana\" => KnapsackObject(3.0, 8.0),\n \"cherry\" => KnapsackObject(2.0, 4.0),\n \"date\" => KnapsackObject(7.0, 2.0),\n \"eggplant\" => KnapsackObject(4.0, 5.0),\n)\ndata = KnapsackData(objects, 10.0)","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"If you want, you can add custom printing to make it easier to visualize:","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"function Base.show(io::IO, data::KnapsackData)\n println(io, \"A knapsack with capacity $(data.capacity) and possible items:\")\n for (k, v) in data.objects\n println(\n io,\n \" $(rpad(k, 8)) : profit = $(v.profit), weight = $(v.weight)\",\n )\n end\n return\nend\n\ndata","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"Then, we can re-write our solve_knapsack function to take our KnapsackData as input:","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"function solve_knapsack_2(data::KnapsackData)\n model = Model(HiGHS.Optimizer)\n @variable(model, x[keys(data.objects)], Bin)\n @objective(model, Max, sum(v.profit * x[k] for (k, v) in data.objects))\n @constraint(\n model,\n sum(v.weight * x[k] for (k, v) in data.objects) <= data.capacity,\n )\n optimize!(model)\n @assert is_solved_and_feasible(model)\n return value.(x)\nend\n\nsolve_knapsack_2(data)","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/#Read-in-data-from-files","page":"Design patterns for larger models","title":"Read in data from files","text":"","category":"section"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"Having a data structure is a good step. But it is still annoying that we have to hard-code the data into Julia. A good next step is to separate the data into an external file format; JSON is a common choice.","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"json_data = \"\"\"\n{\n \"objects\": {\n \"apple\": {\"profit\": 5.0, \"weight\": 2.0},\n \"banana\": {\"profit\": 3.0, \"weight\": 8.0},\n \"cherry\": {\"profit\": 2.0, \"weight\": 4.0},\n \"date\": {\"profit\": 7.0, \"weight\": 2.0},\n \"eggplant\": {\"profit\": 4.0, \"weight\": 5.0}\n },\n \"capacity\": 10.0\n}\n\"\"\"\ntemp_dir = mktempdir()\nknapsack_json_filename = joinpath(temp_dir, \"knapsack.json\")\n# Instead of writing a new file here you could replace `knapsack_json_filename`\n# with the path to a local file.\nwrite(knapsack_json_filename, json_data);\nnothing #hide","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"Now let's write a function that reads this file and builds a KnapsackData object:","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"import JSON\n\nfunction read_data(filename)\n d = JSON.parsefile(filename)\n return KnapsackData(\n Dict(\n k => KnapsackObject(v[\"profit\"], v[\"weight\"]) for\n (k, v) in d[\"objects\"]\n ),\n d[\"capacity\"],\n )\nend\n\ndata = read_data(knapsack_json_filename)","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/#Add-options-via-if-else","page":"Design patterns for larger models","title":"Add options via if-else","text":"","category":"section"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"At this point, we have data in a file format which we can load and solve a single problem. For many users, this might be sufficient. However, at some point you may be asked to add features like \"but what if we want to take more than one of a particular item?\"","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"If this is the first time that you've been asked to add a feature, adding options via if-else statements is a good approach. For example, we might write:","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"function solve_knapsack_3(data::KnapsackData; binary_knapsack::Bool)\n model = Model(HiGHS.Optimizer)\n if binary_knapsack\n @variable(model, x[keys(data.objects)], Bin)\n else\n @variable(model, x[keys(data.objects)] >= 0, Int)\n end\n @objective(model, Max, sum(v.profit * x[k] for (k, v) in data.objects))\n @constraint(\n model,\n sum(v.weight * x[k] for (k, v) in data.objects) <= data.capacity,\n )\n optimize!(model)\n @assert is_solved_and_feasible(model)\n return value.(x)\nend","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"Now we can solve the binary knapsack:","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"solve_knapsack_3(data; binary_knapsack = true)","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"And an integer knapsack where we can take more than one copy of each item:","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"solve_knapsack_3(data; binary_knapsack = false)","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/#Add-configuration-options-via-dispatch","page":"Design patterns for larger models","title":"Add configuration options via dispatch","text":"","category":"section"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"If you get repeated requests to add different options, you'll quickly find yourself in a mess of different flags and if-else statements. It's hard to write, hard to read, and hard to ensure you haven't introduced any bugs. A good solution is to use Julia's type dispatch to control the configuration of the model. The easiest way to explain this is by example.","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"First, start by defining a new abstract type, as well as new subtypes for each of our options. These types are going to control the configuration of the knapsack model.","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"abstract type AbstractConfiguration end\n\nstruct BinaryKnapsackConfig <: AbstractConfiguration end\n\nstruct IntegerKnapsackConfig <: AbstractConfiguration end","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"Then, we rewrite our solve_knapsack function to take a config argument, and we introduce an add_knapsack_variables function to abstract the creation of our variables.","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"function solve_knapsack_4(data::KnapsackData, config::AbstractConfiguration)\n model = Model(HiGHS.Optimizer)\n x = add_knapsack_variables(model, data, config)\n @objective(model, Max, sum(v.profit * x[k] for (k, v) in data.objects))\n @constraint(\n model,\n sum(v.weight * x[k] for (k, v) in data.objects) <= data.capacity,\n )\n optimize!(model)\n @assert is_solved_and_feasible(model)\n return value.(x)\nend","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"For the binary knapsack problem, add_knapsack_variables looks like this:","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"function add_knapsack_variables(\n model::Model,\n data::KnapsackData,\n ::BinaryKnapsackConfig,\n)\n return @variable(model, x[keys(data.objects)], Bin)\nend","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"For the integer knapsack problem, add_knapsack_variables looks like this:","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"function add_knapsack_variables(\n model::Model,\n data::KnapsackData,\n ::IntegerKnapsackConfig,\n)\n return @variable(model, x[keys(data.objects)] >= 0, Int)\nend","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"Now we can solve the binary knapsack:","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"solve_knapsack_4(data, BinaryKnapsackConfig())","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"and the integer knapsack problem:","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"solve_knapsack_4(data, IntegerKnapsackConfig())","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"The main benefit of the dispatch approach is that you can quickly add new options without needing to modify the existing code. For example:","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"struct UpperBoundedKnapsackConfig <: AbstractConfiguration\n limit::Int\nend\n\nfunction add_knapsack_variables(\n model::Model,\n data::KnapsackData,\n config::UpperBoundedKnapsackConfig,\n)\n return @variable(model, 0 <= x[keys(data.objects)] <= config.limit, Int)\nend\n\nsolve_knapsack_4(data, UpperBoundedKnapsackConfig(3))","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/#Generalize-constraints-and-objectives","page":"Design patterns for larger models","title":"Generalize constraints and objectives","text":"","category":"section"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"It's easy to extend the dispatch approach to constraints and objectives as well. The key points to notice in the next two functions are that:","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"we can access registered variables via model[:x]\nwe can define generic functions which accept any AbstractConfiguration as a configuration argument. That means we can implement a single method and have it apply to multiple configuration types.","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"function add_knapsack_constraints(\n model::Model,\n data::KnapsackData,\n ::AbstractConfiguration,\n)\n x = model[:x]\n @constraint(\n model,\n capacity_constraint,\n sum(v.weight * x[k] for (k, v) in data.objects) <= data.capacity,\n )\n return\nend\n\nfunction add_knapsack_objective(\n model::Model,\n data::KnapsackData,\n ::AbstractConfiguration,\n)\n x = model[:x]\n @objective(model, Max, sum(v.profit * x[k] for (k, v) in data.objects))\n return\nend\n\nfunction solve_knapsack_5(data::KnapsackData, config::AbstractConfiguration)\n model = Model(HiGHS.Optimizer)\n add_knapsack_variables(model, data, config)\n add_knapsack_constraints(model, data, config)\n add_knapsack_objective(model, data, config)\n optimize!(model)\n @assert is_solved_and_feasible(model)\n return value.(model[:x])\nend\n\nsolve_knapsack_5(data, BinaryKnapsackConfig())","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/#Remove-solver-dependence,-add-error-checks","page":"Design patterns for larger models","title":"Remove solver dependence, add error checks","text":"","category":"section"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"Compared to where we started, our knapsack model is now significantly different. We've wrapped it in a function, defined some data types, and introduced configuration options to control the variables and constraints that get added. There are a few other steps we can do to further improve things:","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"remove the dependence on HiGHS\nadd checks that we found an optimal solution\nadd a helper function to avoid the need to explicitly construct the data.","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"function solve_knapsack_6(\n optimizer,\n data::KnapsackData,\n config::AbstractConfiguration,\n)\n model = Model(optimizer)\n add_knapsack_variables(model, data, config)\n add_knapsack_constraints(model, data, config)\n add_knapsack_objective(model, data, config)\n optimize!(model)\n if !is_solved_and_feasible(model)\n @warn(\"Model not solved to optimality\")\n return nothing\n end\n return value.(model[:x])\nend\n\nfunction solve_knapsack_6(\n optimizer,\n data::String,\n config::AbstractConfiguration,\n)\n return solve_knapsack_6(optimizer, read_data(data), config)\nend\n\nsolution = solve_knapsack_6(\n HiGHS.Optimizer,\n knapsack_json_filename,\n BinaryKnapsackConfig(),\n)","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/#Create-a-module","page":"Design patterns for larger models","title":"Create a module","text":"","category":"section"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"Now we're ready to expose our model to the wider world. That might be as part of a larger Julia project that we're contributing to, or as a stand-alone script that we can run on-demand. In either case, it's good practice to wrap everything in a module. This further encapsulates our code into a single namespace, and we can add documentation in the form of docstrings.","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"Some good rules to follow when creating a module are:","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"use import in a module instead of using to make it clear which functions are from which packages\nuse _ to start function and type names that are considered private\nadd docstrings to all public variables and functions.","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"module KnapsackModel\n\nimport JuMP\nimport JSON\n\nstruct _KnapsackObject\n profit::Float64\n weight::Float64\n function _KnapsackObject(profit::Float64, weight::Float64)\n if weight < 0\n throw(DomainError(\"Weight of object cannot be negative\"))\n end\n return new(profit, weight)\n end\nend\n\nstruct _KnapsackData\n objects::Dict{String,_KnapsackObject}\n capacity::Float64\nend\n\nfunction _read_data(filename)\n d = JSON.parsefile(filename)\n return _KnapsackData(\n Dict(\n k => _KnapsackObject(v[\"profit\"], v[\"weight\"]) for\n (k, v) in d[\"objects\"]\n ),\n d[\"capacity\"],\n )\nend\n\nabstract type _AbstractConfiguration end\n\n\"\"\"\n BinaryKnapsackConfig()\n\nCreate a binary knapsack problem where each object can be taken 0 or 1 times.\n\"\"\"\nstruct BinaryKnapsackConfig <: _AbstractConfiguration end\n\n\"\"\"\n IntegerKnapsackConfig()\n\nCreate an integer knapsack problem where each object can be taken any number of\ntimes.\n\"\"\"\nstruct IntegerKnapsackConfig <: _AbstractConfiguration end\n\nfunction _add_knapsack_variables(\n model::JuMP.Model,\n data::_KnapsackData,\n ::BinaryKnapsackConfig,\n)\n return JuMP.@variable(model, x[keys(data.objects)], Bin)\nend\n\nfunction _add_knapsack_variables(\n model::JuMP.Model,\n data::_KnapsackData,\n ::IntegerKnapsackConfig,\n)\n return JuMP.@variable(model, x[keys(data.objects)] >= 0, Int)\nend\n\nfunction _add_knapsack_constraints(\n model::JuMP.Model,\n data::_KnapsackData,\n ::_AbstractConfiguration,\n)\n x = model[:x]\n JuMP.@constraint(\n model,\n capacity_constraint,\n sum(v.weight * x[k] for (k, v) in data.objects) <= data.capacity,\n )\n return\nend\n\nfunction _add_knapsack_objective(\n model::JuMP.Model,\n data::_KnapsackData,\n ::_AbstractConfiguration,\n)\n x = model[:x]\n JuMP.@objective(model, Max, sum(v.profit * x[k] for (k, v) in data.objects))\n return\nend\n\nfunction _solve_knapsack(\n optimizer,\n data::_KnapsackData,\n config::_AbstractConfiguration,\n)\n model = JuMP.Model(optimizer)\n _add_knapsack_variables(model, data, config)\n _add_knapsack_constraints(model, data, config)\n _add_knapsack_objective(model, data, config)\n JuMP.optimize!(model)\n if !JuMP.is_solved_and_feasible(model)\n @warn(\"Model not solved to optimality\")\n return nothing\n end\n return JuMP.value.(model[:x])\nend\n\n\"\"\"\n solve_knapsack(\n optimizer,\n knapsack_json_filename::String,\n config::_AbstractConfiguration,\n )\n\nSolve the knapsack problem and return the optimal primal solution\n\n# Arguments\n\n * `optimizer` : an object that can be passed to `JuMP.Model` to construct a new\n JuMP model.\n * `knapsack_json_filename` : the filename of a JSON file containing the data for the\n problem.\n * `config` : an object to control the type of knapsack model constructed.\n Valid options are:\n * `BinaryKnapsackConfig()`\n * `IntegerKnapsackConfig()`\n\n# Returns\n\n * If an optimal solution exists: a `JuMP.DenseAxisArray` that maps the `String`\n name of each object to the number of objects to pack into the knapsack.\n * Otherwise, `nothing`, indicating that the problem does not have an optimal\n solution.\n\n# Example\n\n```julia\nsolution = solve_knapsack(\n HiGHS.Optimizer,\n \"path/to/data.json\",\n BinaryKnapsackConfig(),\n)\n```\n\n```julia\nsolution = solve_knapsack(\n MOI.OptimizerWithAttributes(HiGHS.Optimizer, \"output_flag\" => false),\n \"path/to/data.json\",\n IntegerKnapsackConfig(),\n)\n```\n\"\"\"\nfunction solve_knapsack(\n optimizer,\n knapsack_json_filename::String,\n config::_AbstractConfiguration,\n)\n data = _read_data(knapsack_json_filename)\n return _solve_knapsack(optimizer, data, config)\nend\n\nend","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"Finally, you can call your model:","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"import .KnapsackModel\n\nKnapsackModel.solve_knapsack(\n HiGHS.Optimizer,\n knapsack_json_filename,\n KnapsackModel.BinaryKnapsackConfig(),\n)","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"note: Note\nThe . in .KnapsackModel denotes that it is a submodule and not a separate package that we installed with Pkg.add. If you put the KnapsackModel in a separate file, load it with:include(\"path/to/KnapsackModel.jl\")\nimport .KnapsackModel","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/#Add-tests","page":"Design patterns for larger models","title":"Add tests","text":"","category":"section"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"As a final step, you should add tests for your model. This often means testing on a small problem for which you can work out the optimal solution by hand. The Julia standard library Test has good unit-testing functionality.","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"import .KnapsackModel\nusing Test\n\n@testset \"KnapsackModel\" begin\n @testset \"feasible_binary_knapsack\" begin\n x = KnapsackModel.solve_knapsack(\n HiGHS.Optimizer,\n knapsack_json_filename,\n KnapsackModel.BinaryKnapsackConfig(),\n )\n @test isapprox(x[\"apple\"], 1, atol = 1e-5)\n @test isapprox(x[\"banana\"], 0, atol = 1e-5)\n @test isapprox(x[\"cherry\"], 0, atol = 1e-5)\n @test isapprox(x[\"date\"], 1, atol = 1e-5)\n @test isapprox(x[\"eggplant\"], 1, atol = 1e-5)\n end\n @testset \"feasible_integer_knapsack\" begin\n x = KnapsackModel.solve_knapsack(\n HiGHS.Optimizer,\n knapsack_json_filename,\n KnapsackModel.IntegerKnapsackConfig(),\n )\n @test isapprox(x[\"apple\"], 0, atol = 1e-5)\n @test isapprox(x[\"banana\"], 0, atol = 1e-5)\n @test isapprox(x[\"cherry\"], 0, atol = 1e-5)\n @test isapprox(x[\"date\"], 5, atol = 1e-5)\n @test isapprox(x[\"eggplant\"], 0, atol = 1e-5)\n end\n @testset \"infeasible_binary_knapsack\" begin\n dir = mktempdir()\n infeasible_filename = joinpath(dir, \"infeasible.json\")\n write(\n infeasible_filename,\n \"\"\"{\n \"objects\": {\n \"apple\": {\"profit\": 5.0, \"weight\": 2.0},\n \"banana\": {\"profit\": 3.0, \"weight\": 8.0},\n \"cherry\": {\"profit\": 2.0, \"weight\": 4.0},\n \"date\": {\"profit\": 7.0, \"weight\": 2.0},\n \"eggplant\": {\"profit\": 4.0, \"weight\": 5.0}\n },\n \"capacity\": -10.0\n }\"\"\",\n )\n x = KnapsackModel.solve_knapsack(\n HiGHS.Optimizer,\n infeasible_filename,\n KnapsackModel.BinaryKnapsackConfig(),\n )\n @test x === nothing\n end\nend","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"tip: Tip\nPlace these tests in a separate file test_knapsack_model.jl so that you can run the tests by adding include(\"test_knapsack_model.jl\") to any file where needed.","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/#Next-steps","page":"Design patterns for larger models","title":"Next steps","text":"","category":"section"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"We've only briefly scratched the surface of ways to create and structure large JuMP models, so consider this tutorial a starting point, rather than a comprehensive list of all the possible ways to structure JuMP models. If you are embarking on a large project that uses JuMP, a good next step is to look at ways people have written large JuMP projects \"in the wild.\"","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"Here are some good examples (all co-incidentally related to energy):","category":"page"},{"location":"tutorials/getting_started/design_patterns_for_larger_models/","page":"Design patterns for larger models","title":"Design patterns for larger models","text":"AnyMOD.jl\nJuMP-dev 2021 talk\nsource code\nPowerModels.jl\nJuMP-dev 2021 talk\nsource code\nPowerSimulations.jl\nJuliaCon 2021 talk\nsource code\nUnitCommitment.jl\nJuMP-dev 2021 talk\nsource code","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"EditURL = \"multi.jl\"","category":"page"},{"location":"tutorials/linear/multi/#The-multi-commodity-flow-problem","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"","category":"section"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"This tutorial was generated using Literate.jl. Download the source as a .jl file.","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"This tutorial was originally contributed by Louis Luangkesorn.","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"This tutorial is a JuMP implementation of the multi-commodity transportation model described in AMPL: A Modeling Language for Mathematical Programming, by R. Fourer, D.M. Gay and B.W. Kernighan.","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"The purpose of this tutorial is to demonstrate creating a JuMP model from an SQLite database.","category":"page"},{"location":"tutorials/linear/multi/#Required-packages","page":"The multi-commodity flow problem","title":"Required packages","text":"","category":"section"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"This tutorial uses the following packages","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"using JuMP\nimport DataFrames\nimport HiGHS\nimport SQLite\nimport Tables\nimport Test\n\nconst DBInterface = SQLite.DBInterface","category":"page"},{"location":"tutorials/linear/multi/#Formulation","page":"The multi-commodity flow problem","title":"Formulation","text":"","category":"section"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"The multi-commondity flow problem is a simple extension of The transportation problem to multiple types of products. Briefly, we start with the formulation of the transportation problem:","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"beginaligned\nmin sum_i in O j in D c_ij x_ij \nst sum_j in D x_i j le s_i forall i in O \n sum_i in O x_i j = d_j forall j in D \n x_i j ge 0 forall i in O j in D\nendaligned","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"but introduce a set of products P, resulting in:","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"beginaligned\nmin sum_i in O j in D k in P c_ijk x_ijk \nst sum_j in D x_i j k le s_ik forall i in O k in P \n sum_i in O x_i j k = d_jk forall j in D k in P \n x_i jk ge 0 forall i in O j in D k in P \n sum_k in P x_i j k le u_ij forall i in O j in D\nendaligned","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"Note that the last constraint is new; it says that there is a maximum quantity of goods (of any type) that can be transported from origin i to destination j.","category":"page"},{"location":"tutorials/linear/multi/#Data","page":"The multi-commodity flow problem","title":"Data","text":"","category":"section"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"For the purpose of this tutorial, the JuMP repository contains an example database called multi.sqlite.","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"filename = joinpath(@__DIR__, \"multi.sqlite\");\nnothing #hide","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"To run locally, download multi.sqlite and update filename appropriately.","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"Load the database using SQLite.DB:","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"db = SQLite.DB(filename)","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"A quick way to see the schema of the database is via SQLite.tables:","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"SQLite.tables(db)","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"We interact with the database by executing queries, and then piping the results to an appropriate table. One example is a DataFrame:","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"DBInterface.execute(db, \"SELECT * FROM locations\") |> DataFrames.DataFrame","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"But other table types are supported, such as Tables.rowtable:","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"DBInterface.execute(db, \"SELECT * FROM locations\") |> Tables.rowtable","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"A rowtable is a Vector of NamedTuples.","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"You can construct more complicated SQL queries:","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"origins =\n DBInterface.execute(\n db,\n \"SELECT location FROM locations WHERE type = \\\"origin\\\"\",\n ) |> Tables.rowtable","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"But for our purpose, we just want the list of strings:","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"origins = map(y -> y.location, origins)","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"We can compose these two operations to get a list of destinations:","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"destinations =\n DBInterface.execute(\n db,\n \"SELECT location FROM locations WHERE type = \\\"destination\\\"\",\n ) |>\n Tables.rowtable |>\n x -> map(y -> y.location, x)","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"And a list of products from our products table:","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"products =\n DBInterface.execute(db, \"SELECT product FROM products\") |>\n Tables.rowtable |>\n x -> map(y -> y.product, x)","category":"page"},{"location":"tutorials/linear/multi/#JuMP-formulation","page":"The multi-commodity flow problem","title":"JuMP formulation","text":"","category":"section"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"We start by creating a model and our decision variables:","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"model = Model(HiGHS.Optimizer)\nset_silent(model)\n@variable(model, x[origins, destinations, products] >= 0)","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"One approach when working with databases is to extract all of the data into a Julia datastructure. For example, let's pull the cost table into a DataFrame and then construct our objective by iterating over the rows of the DataFrame:","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"cost = DBInterface.execute(db, \"SELECT * FROM cost\") |> DataFrames.DataFrame\n@objective(\n model,\n Max,\n sum(r.cost * x[r.origin, r.destination, r.product] for r in eachrow(cost)),\n);\nnothing #hide","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"If we don't want to use a DataFrame, we can use a Tables.rowtable instead:","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"supply = DBInterface.execute(db, \"SELECT * FROM supply\") |> Tables.rowtable\nfor r in supply\n @constraint(model, sum(x[r.origin, :, r.product]) <= r.supply)\nend","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"Another approach is to execute the query, and then to iterate through the rows of the query using Tables.rows:","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"demand = DBInterface.execute(db, \"SELECT * FROM demand\")\nfor r in Tables.rows(demand)\n @constraint(model, sum(x[:, r.destination, r.product]) == r.demand)\nend","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"warning: Warning\nIterating through the rows of a query result works by incrementing a cursor inside the database. As a consequence, you cannot call Tables.rows twice on the same query result.","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"The SQLite queries can be arbitrarily complex. For example, here's a query which builds every possible origin-destination pair:","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"od_pairs = DBInterface.execute(\n db,\n \"\"\"\n SELECT a.location as 'origin',\n b.location as 'destination'\n FROM locations a\n INNER JOIN locations b\n ON a.type = 'origin' AND b.type = 'destination'\n \"\"\",\n)","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"With a constraint that we cannot send more than 625 units between each pair:","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"for r in Tables.rows(od_pairs)\n @constraint(model, sum(x[r.origin, r.destination, :]) <= 625)\nend","category":"page"},{"location":"tutorials/linear/multi/#Solution","page":"The multi-commodity flow problem","title":"Solution","text":"","category":"section"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"Finally, we can optimize the model:","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"optimize!(model)\nTest.@test is_solved_and_feasible(model)\nsolution_summary(model)","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"and print the solution:","category":"page"},{"location":"tutorials/linear/multi/","page":"The multi-commodity flow problem","title":"The multi-commodity flow problem","text":"begin\n println(\" \", join(products, ' '))\n for o in origins, d in destinations\n v = lpad.([round(Int, value(x[o, d, p])) for p in products], 5)\n println(o, \" \", d, \" \", join(replace.(v, \" 0\" => \" . \"), \" \"))\n end\nend","category":"page"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"EditURL = \"finance.jl\"","category":"page"},{"location":"tutorials/linear/finance/#Financial-modeling-problems","page":"Financial modeling problems","title":"Financial modeling problems","text":"","category":"section"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"This tutorial was generated using Literate.jl. Download the source as a .jl file.","category":"page"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"This tutorial was originally contributed by Arpit Bhatia.","category":"page"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"Optimization models play an increasingly important role in financial decisions. Many computational finance problems can be solved efficiently using modern optimization techniques.","category":"page"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"In this tutorial we will discuss two such examples taken from (Cornuéjols et al., 2018).","category":"page"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"This tutorial uses the following packages","category":"page"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"using JuMP\nimport HiGHS","category":"page"},{"location":"tutorials/linear/finance/#Short-term-financing","page":"Financial modeling problems","title":"Short-term financing","text":"","category":"section"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"Corporations routinely face the problem of financing short term cash commitments such as the following:","category":"page"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"Month Jan Feb Mar Apr May Jun\nNet Cash Flow -150 -100 200 -200 50 300","category":"page"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"Net cash flow requirements are given in thousands of dollars. The company has the following sources of funds:","category":"page"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"A line of credit of up to $100K at an interest rate of 1% per month,\nIn any one of the first three months, it can issue 90-day commercial paper bearing a total interest of 2% for the 3-month period,\nExcess funds can be invested at an interest rate of 0.3% per month.","category":"page"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"Our task is to find out the most economical way to use these 3 sources such that we end up with the most amount of money at the end of June.","category":"page"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"We model this problem in the following manner:","category":"page"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"We will use the following decision variables:","category":"page"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"the amount u_i drawn from the line of credit in month i\nthe amount v_i of commercial paper issued in month i\nthe excess funds w_i in month i","category":"page"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"Here we have three types of constraints:","category":"page"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"for every month, cash inflow = cash outflow for each month\nupper bounds on u_i\nnonnegativity of the decision variables u_i, v_i and w_i.","category":"page"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"Our objective will be to simply maximize the company's wealth in June, which say we represent with the variable m.","category":"page"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"financing = Model(HiGHS.Optimizer)\n\n@variables(financing, begin\n 0 <= u[1:5] <= 100\n 0 <= v[1:3]\n 0 <= w[1:5]\n m\nend)\n\n@objective(financing, Max, m)\n\n@constraints(\n financing,\n begin\n u[1] + v[1] - w[1] == 150 # January\n u[2] + v[2] - w[2] - 1.01u[1] + 1.003w[1] == 100 # February\n u[3] + v[3] - w[3] - 1.01u[2] + 1.003w[2] == -200 # March\n u[4] - w[4] - 1.02v[1] - 1.01u[3] + 1.003w[3] == 200 # April\n u[5] - w[5] - 1.02v[2] - 1.01u[4] + 1.003w[4] == -50 # May\n -m - 1.02v[3] - 1.01u[5] + 1.003w[5] == -300 # June\n end\n)\n\noptimize!(financing)\n@assert is_solved_and_feasible(financing)\nobjective_value(financing)","category":"page"},{"location":"tutorials/linear/finance/#Combinatorial-auctions","page":"Financial modeling problems","title":"Combinatorial auctions","text":"","category":"section"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"In many auctions, the value that a bidder has for a set of items may not be the sum of the values that he has for individual items.","category":"page"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"Examples are equity trading, electricity markets, pollution right auctions and auctions for airport landing slots.","category":"page"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"To take this into account, combinatorial auctions allow the bidders to submit bids on combinations of items.","category":"page"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"Let M=12 ldots m be the set of items that the auctioneer has to sell. A bid is a pair B_j=left(S_j p_jright) where S_j subseteq M is a nonempty set of items and p_j is the price offer for this set.","category":"page"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"Suppose that the auctioneer has received n bids B_1 B_2 ldots B_n The goal of this problem is to help an auctioneer determine the winners in order to maximize his revenue.","category":"page"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"We model this problem by taking a decision variable y_j for every bid. We add a constraint that each item i is sold at most once. This gives us the following model:","category":"page"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"beginaligned\nmax sum_i=1^n p_j y_j \ntext st sum_j i in S_j y_j leq 1 forall i=12 ldots m \n y_j in01 forall j in12 ldots n\nendaligned","category":"page"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"bid_values = [6 3 12 12 8 16]\nbid_items = [[1], [2], [3 4], [1 3], [2 4], [1 3 4]]\n\nauction = Model(HiGHS.Optimizer)\n@variable(auction, y[1:6], Bin)\n@objective(auction, Max, sum(y' .* bid_values))\nfor i in 1:6\n @constraint(auction, sum(y[j] for j in 1:6 if i in bid_items[j]) <= 1)\nend\noptimize!(auction)\n@assert is_solved_and_feasible(auction)\nobjective_value(auction)","category":"page"},{"location":"tutorials/linear/finance/","page":"Financial modeling problems","title":"Financial modeling problems","text":"value.(y)","category":"page"},{"location":"packages/GAMS/","page":"GAMS-dev/GAMS.jl","title":"GAMS-dev/GAMS.jl","text":"EditURL = \"https://github.com/GAMS-dev/GAMS.jl/blob/e00a845d42653adf5d26c7cef1ef84fb990c91c7/README.md\"","category":"page"},{"location":"packages/GAMS/#GAMS.jl","page":"GAMS-dev/GAMS.jl","title":"GAMS.jl","text":"","category":"section"},{"location":"packages/GAMS/","page":"GAMS-dev/GAMS.jl","title":"GAMS-dev/GAMS.jl","text":"GAMS.jl provides a MathOptInterface Optimizer to solve JuMP models using GAMS.","category":"page"},{"location":"packages/GAMS/","page":"GAMS-dev/GAMS.jl","title":"GAMS-dev/GAMS.jl","text":"GAMS comes with dozens of supported solvers. Among them are: ALPHAECP, ANTIGONE, BARON, CBC, CONOPT, CPLEX, DICOPT, GUROBI, IPOPT, KNITRO, LINDO, LINDOGLOBAL, MINOS, MOSEK, NLPEC, PATH, QUADMINOS, SBB, SHOT, SCIP, SNOPT, SOPLEX, XPRESS. Find a complete list here.","category":"page"},{"location":"packages/GAMS/","page":"GAMS-dev/GAMS.jl","title":"GAMS-dev/GAMS.jl","text":"GAMS.jl supports the following JuMP features:","category":"page"},{"location":"packages/GAMS/","page":"GAMS-dev/GAMS.jl","title":"GAMS-dev/GAMS.jl","text":"linear, quadratic and nonlinear (convex and non-convex) objective and constraints\ncontinuous, binary, integer, semi-continuous and semi-integer variables\nSOS1 and SOS2 sets\ncomplementarity constraints","category":"page"},{"location":"packages/GAMS/#Installation","page":"GAMS-dev/GAMS.jl","title":"Installation","text":"","category":"section"},{"location":"packages/GAMS/","page":"GAMS-dev/GAMS.jl","title":"GAMS-dev/GAMS.jl","text":"Download GAMS and obtain a GAMS license. Please note that GAMS also offers a free community license.\n(optional) Add the GAMS system directory to the PATH variable in order to find GAMS automatically.\nInstall GAMS.jl using the Julia package manager:\nusing Pkg\nPkg.add(\"GAMS\")","category":"page"},{"location":"packages/GAMS/#Usage","page":"GAMS-dev/GAMS.jl","title":"Usage","text":"","category":"section"},{"location":"packages/GAMS/","page":"GAMS-dev/GAMS.jl","title":"GAMS-dev/GAMS.jl","text":"Using GAMS as optimizer for your JuMP model:","category":"page"},{"location":"packages/GAMS/","page":"GAMS-dev/GAMS.jl","title":"GAMS-dev/GAMS.jl","text":"using GAMS, JuMP\nmodel = Model(GAMS.Optimizer)","category":"page"},{"location":"packages/GAMS/#GAMS-System","page":"GAMS-dev/GAMS.jl","title":"GAMS System","text":"","category":"section"},{"location":"packages/GAMS/","page":"GAMS-dev/GAMS.jl","title":"GAMS-dev/GAMS.jl","text":"If the GAMS system directory has been added to the PATH variable (you can check this with print(ENV[\"PATH\"])), GAMS.jl will find it automatically. Otherwise, or if you like to switch between systems, the system directory can be specified by (one of the following):","category":"page"},{"location":"packages/GAMS/","page":"GAMS-dev/GAMS.jl","title":"GAMS-dev/GAMS.jl","text":"set_optimizer_attribute(model, \"SysDir\", \"\")\nset_optimizer_attribute(model, GAMS.SysDir(), \"\")","category":"page"},{"location":"packages/GAMS/","page":"GAMS-dev/GAMS.jl","title":"GAMS-dev/GAMS.jl","text":"Analogously, you can specify a working directory with \"WorkDir\" or GAMS.WorkDir(). If no working directory has been set, GAMS.jl will create a temporary one.","category":"page"},{"location":"packages/GAMS/","page":"GAMS-dev/GAMS.jl","title":"GAMS-dev/GAMS.jl","text":"If you want to use the same GAMS workspace (same system and working directory) for multiple models, you can create a GAMSWorkspace first with either of the following","category":"page"},{"location":"packages/GAMS/","page":"GAMS-dev/GAMS.jl","title":"GAMS-dev/GAMS.jl","text":"ws = GAMS.GAMSWorkspace()\nws = GAMS.GAMSWorkspace(\"\")\nws = GAMS.GAMSWorkspace(\"\", \"\")","category":"page"},{"location":"packages/GAMS/","page":"GAMS-dev/GAMS.jl","title":"GAMS-dev/GAMS.jl","text":"and then pass it to your models:","category":"page"},{"location":"packages/GAMS/","page":"GAMS-dev/GAMS.jl","title":"GAMS-dev/GAMS.jl","text":"model = Model(() -> GAMS.Optimizer(ws))","category":"page"},{"location":"packages/GAMS/#GAMS-Options","page":"GAMS-dev/GAMS.jl","title":"GAMS Options","text":"","category":"section"},{"location":"packages/GAMS/","page":"GAMS-dev/GAMS.jl","title":"GAMS-dev/GAMS.jl","text":"GAMS command line options can be specified by","category":"page"},{"location":"packages/GAMS/","page":"GAMS-dev/GAMS.jl","title":"GAMS-dev/GAMS.jl","text":"set_optimizer_attribute(model, \"