-
Notifications
You must be signed in to change notification settings - Fork 1
/
parallel.mpic
77 lines (66 loc) · 1.51 KB
/
parallel.mpic
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
#include <mpi.h>
#include "parallel.mpi.h"
PURE size_t
rank()
{
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
return (size_t)rank;
}
PURE size_t
size()
{
int size;
MPI_Comm_size(MPI_COMM_WORLD, &size);
return (size_t)size;
}
void par_abort() { MPI_Abort(MPI_COMM_WORLD, -1); }
void
broadcastf(float* f, size_t n)
{
MPI_Bcast(f, n, MPI_FLOAT, 0, MPI_COMM_WORLD);
}
void
broadcastlf(double* lf, size_t n)
{
MPI_Bcast(lf, n, MPI_DOUBLE, 0, MPI_COMM_WORLD);
}
void
broadcastzu(size_t* zu, size_t n)
{
/* MPI doesn't have a "size_t" equivalent. Use a tempvar instead. */
unsigned data[n];
if(rank() == 0) {
unsigned* v;
size_t* z;
for(v=data, z=zu; v < data+n; ++v, ++z) {
*v = *z;
}
}
MPI_Bcast(data, n, MPI_UNSIGNED, 0, MPI_COMM_WORLD);
unsigned* v; size_t* z;
for(v=data, z=zu; v < data+n; ++v, ++z) { *z = *v; }
}
void
broadcasts(char* str, size_t len)
{
MPI_Bcast(str, len, MPI_BYTE, 0, MPI_COMM_WORLD);
}
void
broadcastb(bool* b, size_t n)
{
MPI_Bcast(b, n, MPI_BYTE, 0, MPI_COMM_WORLD);
}
void barrier() { MPI_Barrier(MPI_COMM_WORLD); }
void
allgatherf(float* f, int* rcvcount, int* displacements)
{
MPI_Allgatherv(MPI_IN_PLACE, 0 /* ignored */, MPI_DATATYPE_NULL /*ignored*/,
f, rcvcount, displacements, MPI_FLOAT, MPI_COMM_WORLD);
}
void
allgatherlf(double* lf, int* rcvcount, int* displacements)
{
MPI_Allgatherv(MPI_IN_PLACE, 0 /* ignored */, MPI_DATATYPE_NULL /*ignored*/,
lf, rcvcount, displacements, MPI_DOUBLE, MPI_COMM_WORLD);
}