Skip to content

Commit

Permalink
Second Submission
Browse files Browse the repository at this point in the history
  • Loading branch information
TaperChipmunk32 committed Apr 29, 2024
1 parent 4e45b32 commit 4cac64d
Show file tree
Hide file tree
Showing 9 changed files with 376 additions and 142 deletions.
Binary file modified example/homework/01/01
Binary file not shown.
42 changes: 29 additions & 13 deletions example/homework/01/01.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,31 +27,40 @@ int main(int argc, char** argv) {
int M = 30;

// First split based on ranks divided by Q
int row_color = world_rank / Q;
MPI_Comm row_comm;
MPI_Comm_split(MPI_COMM_WORLD, row_color, world_rank, &row_comm);

// Second split based on ranks mod Q
int col_color = world_rank % Q;
int col_color = world_rank / Q;
MPI_Comm col_comm;
MPI_Comm_split(MPI_COMM_WORLD, col_color, world_rank, &col_comm);

// Second split based on ranks mod Q
int row_color = world_rank % Q;
MPI_Comm row_comm;
MPI_Comm_split(MPI_COMM_WORLD, row_color, world_rank, &row_comm);

int p = world_rank / Q;
int q = world_rank % Q;
int m, n;
LinearDistribution x_dist(P, M);
LinearDistribution y_dist(Q, M);

// Process (p,q) will have m elements of x
m = x_dist.m(p);
int m = x_dist.m(p);
// Process (p,q) will have n elements of y
int n = y_dist.m(q);

// Allocate the vectors x and y
int *x_local = new int[m];
int *y_local = new int[m];
for (int i = 0; i < m; i++) {
x_local[i] = 0;
}

int *y_local = new int[n];
for (int i = 0; i < n; i++) {
y_local[i] = 0;
}

MPI_Request request;
int *sendcounts = new int[P];
int *displs = new int[P];

// Process (0,0) will have the initial data
int *x_global = new int[M];
if (world_rank == 0) {
Expand All @@ -60,23 +69,26 @@ int main(int argc, char** argv) {
}
}

MPI_Request request;
// Scatter x_global to x_local
// Calculate the displacement and count arrays for Iscatterv
int *sendcounts = new int[P];
int *displs = new int[P];
for (int i = 0; i < P; i++) {
sendcounts[i] = x_dist.m(i);
displs[i] = i * m;
}

// Scatter x_global to x_local using Iscatterv
MPI_Iscatterv(x_global, sendcounts, displs, MPI_INT, x_local, m, MPI_INT, 0, row_comm, &request);

MPI_Wait(&request, MPI_STATUS_IGNORE);

delete[] sendcounts;
delete[] displs;

// Broadcast x_local to all processes in the row
MPI_Request bcast_request;
MPI_Ibcast(x_local, m, MPI_INT, 0, col_comm, &bcast_request);
MPI_Wait(&bcast_request, MPI_STATUS_IGNORE);

int nominal1 = M/P; int extra1 = M%P;
int nominal2 = M/Q; int extra2 = M%Q;

Expand All @@ -98,8 +110,12 @@ int main(int argc, char** argv) {
}
}

MPI_Request reduce_request;
MPI_Iallreduce(MPI_IN_PLACE, y_local, n, MPI_INT, MPI_SUM, col_comm, &reduce_request);
MPI_Wait(&reduce_request, MPI_STATUS_IGNORE);

//print the results
std::cout << "Rank " << world_rank << " has the following values:" << std::endl;
std::cout << "World Rank " << world_rank << "(" << p << ", " << q << ")" << " has the following values:" << std::endl;
for (int i = 0; i < m; i++) {
std::cout << " x[" << i << "] = " << x_local[i] << std::endl;
}
Expand Down
Binary file modified example/homework/03/03
Binary file not shown.
121 changes: 71 additions & 50 deletions example/homework/03/03.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,19 +5,6 @@ elements per process is the same as in the linear load-balanced distribution wou
over Q partitions.
*/

/*Problem 1 (30pts/24pts):
Write an MPI program that builds a 2D process topology of shape P ×Q. On each column of processes, store a
vector x of length M , distributed in a linear load-balanced fashion “vertically” (it will be replicated Q times).
Start with data only in process (0,0), and distribute it down the first column. Once it is distribute on column 0,
broadcast it horizontally in each process row. Allocate a vector y of length M that is replicated “horizontally”
in each process row and stored also in linear load-balanced distribution; there will be P replicas, one in each
process row. Using MPI Allreduce with the appropriate communicators, do the parallel
copy y := x. There should be P replicas of the answer in y when you’re done.
Notes:
• CSC4760: Leverage your work on Problem #0 above to help do this problem.
• CSC5760: Leverage your work on HW#2, Problem #5 to help do this problem
*/

#include "mpi.h"
#include "Distributions.h"
#include <iostream>
Expand All @@ -31,71 +18,105 @@ int main(int argc, char** argv) {

int Q = 2;
int P = world_size / Q;
int M = 10;
int M = 30;

// First split based on ranks divided by Q
int row_color = world_rank / Q;
MPI_Comm row_comm;
MPI_Comm_split(MPI_COMM_WORLD, row_color, world_rank, &row_comm);

// Second split based on ranks mod Q
int col_color = world_rank % Q;
int col_color = world_rank / Q;
MPI_Comm col_comm;
MPI_Comm_split(MPI_COMM_WORLD, col_color, world_rank, &col_comm);

// Second split based on ranks mod Q
int row_color = world_rank % Q;
MPI_Comm row_comm;
MPI_Comm_split(MPI_COMM_WORLD, row_color, world_rank, &row_comm);

int p = world_rank / Q;
int q = world_rank % Q;
int m, n;
LinearDistribution dist(P, M);
LinearDistribution x_dist(P, M);
ScatterDistribution y_dist(Q, M);

// Process (p,q) will have m elements of x
int m = x_dist.m(p);
// Process (p,q) will have n elements of y
int n = y_dist.m(q);

// Allocate the vectors x and y
int *x = new int[M];
int *y = new int[M];
for (int i = 0; i < M; i++) {
x[i] = 0;
y[i] = 0;
int *x_local = new int[m];
for (int i = 0; i < m; i++) {
x_local[i] = 0;
}

// Process (p,q) will have m elements of x
m = dist.m(p);
n = dist.m(q);
int *y_local = new int[n];
for (int i = 0; i < n; i++) {
y_local[i] = 0;
}

MPI_Request request;
int *sendcounts = new int[P];
int *displs = new int[P];

// Process (0,0) will have the initial data
int *x_global = new int[M];
if (world_rank == 0) {
for (int i = 0; i < M; i++) {
x[i] = i;
x_global[i] = i;
}
}

// Distribute x down the first column
MPI_Request request;
// Scatter x_global to x_local
// Calculate the displacement and count arrays for Iscatterv
for (int i = 0; i < P; i++) {
if (p == i) {
MPI_Ibcast(x, M, MPI_INT, 0, row_comm, &request);
}
sendcounts[i] = x_dist.m(i);
displs[i] = i * m;
}

// Scatter x_global to x_local using Iscatterv
MPI_Iscatterv(x_global, sendcounts, displs, MPI_INT, x_local, m, MPI_INT, 0, row_comm, &request);

MPI_Wait(&request, MPI_STATUS_IGNORE);

// Broadcast x horizontally in each process row
for (int i = 0; i < Q; i++) {
if (q == i) {
MPI_Ibcast(x, M, MPI_INT, 0, col_comm, &request);
delete[] sendcounts;
delete[] displs;

// Broadcast x_local to all processes in the row
MPI_Request bcast_request;
MPI_Ibcast(x_local, m, MPI_INT, 0, col_comm, &bcast_request);
MPI_Wait(&bcast_request, MPI_STATUS_IGNORE);

int nominal1 = M/P; int extra1 = M%P;

for(int i = 0; i < m; i++) // m is the local size of the vector x[]
{
// x local to global: given that this element is (p,i), what is its global index I?
int I = i + ((p < extra1) ? (nominal1+1)*p :
(extra1*(nominal1+1)+(p-extra1)*nominal1));

// so to what (qhat,jhat) does this element of the original global vector go?
int qhat = I%Q;
int jhat = I/Q;

if(qhat == q) // great, this process has an element of y!
{
y_local[jhat] = x_local[i];
}
}
MPI_Wait(&request, MPI_STATUS_IGNORE);

// Copy x to y
MPI_Iallreduce(x, y, M, MPI_INT, MPI_SUM, row_comm, &request);
MPI_Wait(&request, MPI_STATUS_IGNORE);
MPI_Request reduce_request;
MPI_Iallreduce(MPI_IN_PLACE, y_local, n, MPI_INT, MPI_SUM, col_comm, &reduce_request);
MPI_Wait(&reduce_request, MPI_STATUS_IGNORE);

// Print the result
std::cout << "Rank " << world_rank << std::endl;
for (int i = 0; i < M; i++) {
std::cout << "y[" << i << "] = " << y[i] << std::endl;
//print the results
std::cout << "World Rank " << world_rank << "(" << p << ", " << q << ")" << " has the following values:" << std::endl;
for (int i = 0; i < m; i++) {
std::cout << " x[" << i << "] = " << x_local[i] << std::endl;
}
for (int i = 0; i < m; i++) {
std::cout << " y[" << i << "] = " << y_local[i] << std::endl;
}

delete[] x;
delete[] y;
delete[] x_local;
delete[] y_local;
delete[] x_global;


MPI_Finalize();
Expand Down
Binary file added example/homework/04/04_1
Binary file not shown.
Loading

0 comments on commit 4cac64d

Please sign in to comment.