Skip to content

Commit

Permalink
Merge pull request #1 from jalving/master
Browse files Browse the repository at this point in the history
Update to Plasmo v0.3.0
  • Loading branch information
jalving authored Nov 2, 2020
2 parents ce69737 + fb03b81 commit 3e4e6fd
Show file tree
Hide file tree
Showing 4 changed files with 27 additions and 28 deletions.
1 change: 1 addition & 0 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"

[compat]
julia = "1"
Plasmo = "0.3.0"

[extras]
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
34 changes: 16 additions & 18 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,26 +1,33 @@
# PipsSolver.jl

[![Build Status](https://travis-ci.com/jalving/MGPipsSolver.jl.svg?branch=master)](https://travis-ci.com/jalving/PipsSolver.jl)
[![Codecov](https://codecov.io/gh/jalving/MGPipsSolver.jl/branch/master/graph/badge.svg)](https://codecov.io/gh/jalving/PipsSolver.jl)

## Overview
PipsSolver.jl is a Julia interface to the [PIPS-NLP](https://github.com/Argonne-National-Laboratory/PIPS/tree/master/PIPS-NLP) nonlinear optimization solver.
Running the solver requires a working PIPS-NLP installation following the [instructions](https://github.com/Argonne-National-Laboratory/PIPS).
The PipsSolver.jl package works with the graph-based algebraic modeling package [Plasmo.jl](https://github.com/zavalab/Plasmo.jl).

## Installation
PipsSolver.jl can be installed using the following Julia Pkg command.
PipsSolver.jl can be installed using the following Julia Pkg command. Note that currently, PipsSolver.jl only works with Plasmo.jl v0.3.0.

```julia
using Pkg
Pkg.add(Pkg.PackageSpec(name="Plasmo", version="0.3.0"))
Pkg.add(PackageSpec(url="https://github.com/zavalab/PipsSolver.jl.git"))
```

## Useage
```julia
using MPIClusterManagers # to import MPIManager
using Distributed # need to also import Distributed to use addprocs()
using Plasmo

#Setup worker environments
#This will load the environment specified in this script's directory onto each worker
@everywhere using Pkg
@everywhere Pkg.activate((@__DIR__))

#Load Plasmo and PipsSolver on every worker
@everywhere using Plasmo
@everywhere using PipsSolver

graph = OptiGraph()

Expand All @@ -47,21 +54,12 @@ manager=MPIManager(np=2) # specify, number of mpi workers, launch cmd, etc.
addprocs(manager) # start mpi workers and add them as julia workers too.


#Setup worker environments
#NOTE: You will need to load your Julia environment onto each worker
@everywhere using Pkg
@everywhere Pkg.activate((@__DIR__))

#Load Plasmo and PipsSolver on every worker
@everywhere using Plasmo
@everywhere using PipsSolver


julia_workers = sort(collect(values(manager.mpi2j))) # #Distribute the graph to workers

remote_references = PipsSolver.distribute(graph,julia_workers,remote_name = :pipsgraph) #create the variable pipsgraph on each worker
#Map julia workers to MPI ranks
julia_workers = sort(collect(values(manager.mpi2j)))
#Distribute the graph to workers. This creates the variable `pipsgraph` on each worker with an allocation of optinodes.
remote_references = PipsSolver.distribute(graph,julia_workers,remote_name = :pipsgraph)

# The remote modelgraphs can be queried if they are fetched from the other workers
# The remote optigraphs can be queried if they are fetched from the other workers.
r1 = fetch(remote_references[1])
r2 = fetch(remote_references[2])

Expand Down
6 changes: 3 additions & 3 deletions src/PipsNlpInterface.jl
Original file line number Diff line number Diff line change
Expand Up @@ -70,11 +70,11 @@ function getData(m::JuMP.Model)
end
end

function pipsnlp_solve(graph::ModelGraph) #Assume graph variables and constraints are first stage
function pipsnlp_solve(graph::OptiGraph) #Assume graph variables and constraints are first stage

#TODO SUBGRAPHS with linkconstraints to subnodes
if has_subgraphs(graph)
error("The PIPS-NLP does not yet support ModelGraphs with subgraphs. You will need to aggregate the graph before calling pipsnlp_solve")
error("The PIPS-NLP does not yet support OptiGraphs with subgraphs. You will need to aggregate the graph before calling pipsnlp_solve")
end

if has_NLlinkconstraints(graph)
Expand Down Expand Up @@ -711,7 +711,7 @@ function pipsnlp_solve(graph::ModelGraph) #Assume graph variables and constraint
println("PIPS-NLP time: ", time() - t1, " (s)")
end

#TODO. Put solution onto actual ModelNode
#TODO. Put solution onto actual OptiNode
for (idx,node) in enumerate(modelList) #set solution values for each model
local_data = getData(node)
if idx != 1
Expand Down
14 changes: 7 additions & 7 deletions src/distribute.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#Distribute a modelgraph among workers. Each worker should have the same master model. Each worker will be allocated some of the nodes in the original modelgraph
function distribute(mg::ModelGraph,to_workers::Vector{Int64};remote_name = :graph)
function distribute(mg::OptiGraph,to_workers::Vector{Int64};remote_name = :graph)
#NOTE: Linkconstraints keep their indices in new graphs, NOTE: Link constraint row index needs to match on each worker
#NOTE: Does not yet support subgraphs. Aggregate first
#Create remote channel to store the nodes we want to send
Expand All @@ -9,7 +9,7 @@ function distribute(mg::ModelGraph,to_workers::Vector{Int64};remote_name = :grap
channel_indices = RemoteChannel(1)

to_workers = sort(to_workers)

n_nodes = getnumnodes(mg)
n_workers = length(to_workers)
nodes_per_worker = Int64(floor(n_nodes/n_workers))
Expand Down Expand Up @@ -84,10 +84,10 @@ function distribute(mg::ModelGraph,to_workers::Vector{Int64};remote_name = :grap
return remote_references
end

function _create_worker_modelgraph(modelnodes::Vector{ModelNode},node_indices::Vector{Int64},n_nodes::Int64,n_linkeq_cons::Int64,n_linkineq_cons::Int64,
function _create_worker_modelgraph(modelnodes::Vector{OptiNode},node_indices::Vector{Int64},n_nodes::Int64,n_linkeq_cons::Int64,n_linkineq_cons::Int64,
link_ineq_lower::Vector,link_ineq_upper::Vector)
graph = ModelGraph()
graph.node_idx_map = Dict{ModelNode,Int64}()
graph = OptiGraph()
graph.node_idx_map = Dict{OptiNode,Int64}()

#Add nodes to worker's graph. Each worker should have the same number of nodes, but some will be empty.
for i = 1:n_nodes
Expand Down Expand Up @@ -153,7 +153,7 @@ function _create_worker_modelgraph(modelnodes::Vector{ModelNode},node_indices::V
return graph
end

function _add_linkeq_terms(modelnodes::Vector{ModelNode})
function _add_linkeq_terms(modelnodes::Vector{OptiNode})
linkeqconstraints = OrderedDict()
for node in modelnodes
partial_links = node.partial_linkeqconstraints
Expand All @@ -174,7 +174,7 @@ function _add_linkeq_terms(modelnodes::Vector{ModelNode})
return linkeqconstraints
end

function _add_linkineq_terms(modelnodes::Vector{ModelNode})
function _add_linkineq_terms(modelnodes::Vector{OptiNode})
linkineqconstraints = OrderedDict()
for node in modelnodes
partial_links = node.partial_linkineqconstraints
Expand Down

0 comments on commit 3e4e6fd

Please sign in to comment.