Skip to content

Commit

Permalink
updates to docs and added a new option to not broadcast results
Browse files Browse the repository at this point in the history
  • Loading branch information
guilhermebodin committed Nov 11, 2024
1 parent e7c4945 commit 59daa8d
Show file tree
Hide file tree
Showing 6 changed files with 107 additions and 14 deletions.
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@
[codecov-img]: https://codecov.io/gh/psrenergy/JobQueueMPI.jl/coverage.svg?branch=master
[codecov-url]: https://codecov.io/gh/psrenergy/JobQueueMPI.jl?branch=master

| **Build Status** | **Coverage** |
|:-----------------:|:-----------------:|
| **Build Status** | **Coverage** | **Documentation** |
|:-----------------:|:-----------------:|:-----------------:|
| [![Build Status][build-img]][build-url] | [![Codecov branch][codecov-img]][codecov-url] |[![](https://img.shields.io/badge/docs-latest-blue.svg)](https://psrenergy.github.io/JobQueueMPI.jl/dev/)


Expand Down
4 changes: 2 additions & 2 deletions docs/make.jl
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,10 @@ makedocs(;
authors = "psrenergy",
pages = [
"Home" => "index.md",
]
],
)

deploydocs(;
repo = "github.com/psrenergy/JobQueueMPI.jl.git",
push_preview = true,
)
)
94 changes: 94 additions & 0 deletions docs/src/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,100 @@ JobQueueMPI.jl has the following components:
- `Controller`: The controller is responsible for managing the jobs and the workers. It keeps track of the jobs that have been sent and received and sends the jobs to the available workers.
- `Worker`: The worker is responsible for executing the jobs. It receives the jobs from the controller, executes them, and sends the results back to the controller.

Users can call functions to compute jobs in parallel in two ways:
- Building a function and using a `pmap` implementation that will put the function in the job queue and send it to the workers.
```julia
using JobQueueMPI

function sum_100(value)
return value + 100
end

sum_100_answer = JobQueueMPI.pmap(sum_100, collect(1:10))
```
- Building the jobs and sending them to workers explicitly. There are examples of this structure in the test folder. This way is much more flexible than the first one, but it requires more code and knowledge about how MPI works.

```julia
using JobQueueMPI

mutable struct Message
value::Int
vector_idx::Int
end

all_jobs_done(controller) = JQM.is_job_queue_empty(controller) && !JQM.any_pending_jobs(controller)

function sum_100(message::Message)
message.value += 100
return message
end

function update_data(new_data, message::Message)
idx = message.vector_idx
value = message.value
return new_data[idx] = value
end

function workers_loop()
if JQM.is_worker_process()
worker = JQM.Worker()
while true
job = JQM.receive_job(worker)
message = JQM.get_message(job)
if message == JQM.TerminationMessage()
break
end
return_message = sum_100(message)
JQM.send_job_answer_to_controller(worker, return_message)
end
exit(0)
end
end

function job_queue(data)
JQM.mpi_init()
JQM.mpi_barrier()

T = eltype(data)
N = length(data)

if JQM.is_controller_process()
new_data = Array{T}(undef, N)

controller = JQM.Controller(JQM.num_workers())

for i in eachindex(data)
message = Message(data[i], i)
JQM.add_job_to_queue!(controller, message)
end

while !all_jobs_done(controller)
if !JQM.is_job_queue_empty(controller)
JQM.send_jobs_to_any_available_workers(controller)
end
if JQM.any_pending_jobs(controller)
job_answer = JQM.check_for_job_answers(controller)
if !isnothing(job_answer)
message = JQM.get_message(job_answer)
update_data(new_data, message)
end
end
end

JQM.send_termination_message()

return new_data
end
workers_loop()
JQM.mpi_barrier()
JQM.mpi_finalize()
return nothing
end

data = collect(1:10)
new_data = job_queue(data)
```

## API

```@docs
Expand Down
8 changes: 4 additions & 4 deletions src/pmap.jl
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,10 @@ The controller process will return the answer in the same order as the jobs were
return nothing.
"""
function pmap(
f::Function,
jobs::Vector,
data_defined_in_process = nothing;
return_result_in_all_processes::Bool = false
f::Function,
jobs::Vector,
data_defined_in_process = nothing;
return_result_in_all_processes::Bool = true,
)
result = Vector{Any}(undef, length(jobs))
if is_running_in_parallel()
Expand Down
2 changes: 1 addition & 1 deletion test/test_pmap_mpi.jl
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ divisors_answer = JQM.pmap(get_divisors, collect(1:10))
@testset "pmap MPI" begin
@test sum_100_answer == [101, 102, 103, 104, 105, 106, 107, 108, 109, 110]
@test divisors_answer ==
[[1], [1, 2], [1, 3], [1, 2, 4], [1, 5], [1, 2, 3, 6], [1, 7], [1, 2, 4, 8], [1, 3, 9], [1, 2, 5, 10]]
[[1], [1, 2], [1, 3], [1, 2, 4], [1, 5], [1, 2, 3, 6], [1, 7], [1, 2, 4, 8], [1, 3, 9], [1, 2, 5, 10]]
end

JQM.mpi_finalize()
9 changes: 4 additions & 5 deletions test/test_pmap_mpi_optim.jl
Original file line number Diff line number Diff line change
Expand Up @@ -8,19 +8,18 @@ JQM.mpi_init()
N = 5
data = collect(1:N)
function g(x, i, data)
return i * (x[i] - 2 * i) ^ 2 + data[i]
return i * (x[i] - 2 * i)^2 + data[i]
end
x0 = zeros(N)
list_i = collect(1:N)
fake_input = Int[] # ignored


let
is_done = false
if JQM.is_controller_process()
ret = optimize(x0, NelderMead()) do x
MPI.bcast(is_done, MPI.COMM_WORLD)
g_x = JQM.pmap((v)->g(v[1], v[2], data), [(x, i) for i in list_i])
g_x = JQM.pmap((v) -> g(v[1], v[2], data), [(x, i) for i in list_i])
return sum(g_x)
end
# tell workers to stop calling pmap
Expand All @@ -42,11 +41,11 @@ let
if is_done
break
end
JQM.pmap((v)->g(v[1], v[2], data), fake_input)
JQM.pmap((v) -> g(v[1], v[2], data), fake_input)
end
end
end

JQM.mpi_barrier()

JQM.mpi_finalize()
JQM.mpi_finalize()

0 comments on commit 59daa8d

Please sign in to comment.