-
Notifications
You must be signed in to change notification settings - Fork 2
/
dsmapredopen.m
executable file
·228 lines (225 loc) · 9.11 KB
/
dsmapredopen.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
% Author: Carl Doersch (cdoersch at cs dot cmu dot edu)
%
% Spawn a set of parallel workers. njobs is the number of separate
% workers. target is either the hostname of a machine where
% qsub may be run (this machine must have the output
% directory of dswork mounted), or it may be the string
% 'local', in which case the workers will be run on the current
% machine.
%
% conf may include the following:
%
% - qsubopts: any additional flags to pass to qsub.
%
% - singleCompThread: a flag to tell matlab distributed workers to use
% only a single thread for computation. Note
% that without this flag, each matlab can start a large number of
% threads, so running many of them at once can
% make you run out of threads in your userspace.
%
% - runcompiled: a highly experimental flag allowing you to run with compiled
% code and an MCR. This has worked in the past, but it has
% not been maintained. Contact me if you would like to get
% it running again.
%
% You may need to rewrite this function to work on your cluster.
% The assumptions are (1) that ds.sys.outdir will point to a directory
% accessible to both the main thread and the distributed worker
% threads, (2) that filesystem should support sequential, close-to-open,
% or an equivalently strong consistency model, (3) that your filesystem supports
% fifo's (that only have to work within a single machine; most unix
% filesystems do) and (4) that it supports bash or an equivalent shell.
% This function simply generates shell scripts and then qsub's them.
%
function dsmapredopen(njobs,target,conf)
if(nargin<3)
conf=struct();
end
%general setup; should not need to be modified.
if(strcmp(target,'local'))
submitlocal=1;
else
submitlocal=0;
end
if(dsfield(conf,'qsubopts'))
qsubopts=conf.qsubopts;
else
qsubopts='';;
end
if(dsfield(conf,'runcompiled'))
iscompiled=conf.runcompiled
else
%try
% dummy = which(mfilename)
iscompiled=isdeployed;
% iscompiled=0;
%catch
% iscompiled=1;
%end
end
if(dsfield(conf,'chunksize'))
chunksize=conf.chunksize;
else
chunksize=1;
end
if(~iscompiled)
dsworkpath=mfilename('fullpath');
dotpos=find(dsworkpath=='/');
dsworkpath=dsworkpath(1:(dotpos(end)-1));
end
global ds;
sysdir=[ds.sys.outdir 'ds/sys/'];
mymkdir(sysdir);
distprocdir=[ds.sys.outdir 'ds/sys/distproc/'];
mymkdir(distprocdir);
unix(['rm ' distprocdir '*']);
ds.sys.distproc.nmapreducers=njobs;
ds.sys.distproc.hostname=num2cell(char(ones(njobs,1,'uint8').*uint8('?')))';
ds.sys.distproc.commlinkmaster=cell(njobs,1);
ds.sys.distproc.commlinkslave=cell(njobs,1);
ds.sys.distproc.commlinkinterrupt=cell(njobs,1);
ds.sys.distproc.progresslink=cell(njobs,1);
ds.sys.distproc.allslaves=[];
ds.sys.distproc.availslaves=[];
ds.sys.distproc.possibleslaves=1:njobs;
ds.sys.distproc.commfailures=zeros(ds.sys.distproc.nmapreducers,1);
ds.sys.distproc.notresponding=[];
ds.sys.distproc.nextserial=0;
ds.sys.distproc.slavefinishedserial=zeros(ds.sys.distproc.nmapreducers,1)-1;
dsgeninterruptor([distprocdir 'interruptor.sh'])
for(i=1:njobs)
ds.sys.distproc.commlinkmaster{i}=[distprocdir 'master' num2str(i) '.mat'];
ds.sys.distproc.commlinkslave{i}=[distprocdir 'slave' num2str(i) '.mat'];
ds.sys.distproc.progresslink{i}=[distprocdir 'progress' num2str(i)];
ds.sys.distproc.commlinkinterrupt{i}=[distprocdir 'interrupt' num2str(i) '.mat'];
end
dssave();
currchunk={};
nchunks=1;
libs=getenv('LD_LIBRARY_PATH');
for(i=1:njobs)
%generate the script.
disp(['submitting job ' num2str(i)]);
tmpOutFName = [distprocdir 'qsubfile' num2str(i) '.sh'];
fid = fopen(tmpOutFName, 'w');
[~,nm]=unix('hostname');
if(numel(strfind(nm,'teragrid'))>0&&dsfield(ds,'dispoutpath'))
logfile=[ds.dispoutpath '/output' num2str(i) '.log']
else
%this should point to the matlab binary accessible on worker nodes.
logfile=[distprocdir 'output' num2str(i) '.log'];
end
% begin writing the script that will be run via qsub.
mlpipe=[distprocdir '/mlpipe' num2str(i)];
fprintf(fid, '%s\n',['#!/bin/bash'] );
% some qsub systems refuse to forward the LD_LIBRARY_PATH, so we do it ourselves.
if(~isempty(libs))
fprintf(fid, '%s\n',['TMP_LIBRARY_PATH=$LD_LIBRARY_PATH:' libs] );
fprintf(fid, '%s\n',['export LD_LIBRARY_PATH=$(awk ''BEGIN{ORS=":";RS="[:\n]"}!a[$0]++'' <<<"${TMP_LIBRARY_PATH%:}");'] );
end
fprintf(fid, '%s\n',['cd "' pwd '";'] );
fprintf(fid, '%s\n',['runtail=1;']);
fprintf(fid, '%s\n',['if [[ ! -p ' mlpipe ' ]]; then']);
fprintf(fid, '%s\n',[' if mkfifo "' mlpipe '"; then']);
fprintf(fid, '%s\n',[' runtail=1']);
fprintf(fid, '%s\n',[' else']);
fprintf(fid, '%s\n',[' runtail=0']);
fprintf(fid, '%s\n',[' fi']);
fprintf(fid, '%s\n',['fi']);
% on each worker, matlab is run with the fifo (mlpipe) as the STDIN that it reads commands from; this is the magic
% that lets dscmd work. Output is sent to a logfile.
if(iscompiled) % iscompiled is for running compiled in a particular mpi environment.
runnable = ['dplace -c ' num2str(i) ' ./dsmapreducerwrap ' num2str(i) ' "' ds.sys.outdir '" 1'];
if(submitlocal)
runnable = ['nice -n 15 ' runnable];
end
else
if(submitlocal)
matlabbin='nice -n 15 matlab';
else
%this should point to the matlab binary accessible on worker nodes.
[~,matlabbin]=unix('which matlab');
matlabbin=matlabbin(1:end-1);
end
sct='-nojvm';
sct='-nojvm';
if(dsbool(conf,'singleCompThread'))
sct=['-singleCompThread ' sct];
end
%the actual command run in matlab once everything is set up.
matlabcmd=['addpath(''' dsworkpath ''');dsmapreducerwrap(' num2str(i) ',''' ds.sys.outdir ''',1);']
runnable = [matlabbin ' -nodesktop -nosplash ' sct ' -r "' matlabcmd '"'];
end
if(exist(mlpipe,'file'))
else
inputpipe=[];
end
fullcmd=[' ' runnable ' 2>&1 >> "' logfile '" &' ];
fullcmd2=[' ' runnable ' 2>&1 < ' mlpipe ' >> "' logfile '" &' ];
% note that this sh file is responsiible for dealing with dsmapredrestart
% (as well as dealing with matlab crashing badly/segfaulting). This while
% loop simply runs matlab and, if its return code is nonzero, restart matlab.
fprintf(fid, '%s\n',['rm ' logfile ]);
fprintf(fid, '%s\n',['false;']);
fprintf(fid, '%s\n',['while [ $? -gt 0 ]; do']);
fprintf(fid, '%s\n',['if [ $runtail ]; then']);
fprintf(fid, '%s\n',[' ' fullcmd2]);
fprintf(fid, '%s\n',[' mypid=$!']);
fprintf(fid, '%s\n',[' echo "a" > ' mlpipe]);
fprintf(fid, '%s\n',['else']);
fprintf(fid, '%s\n',[' ' fullcmd]);
fprintf(fid, '%s\n',[' mypid=$!']);
fprintf(fid, '%s\n',['fi']);
%if(~iscompiled)
fprintf(fid, '%s\n',['echo $mypid > ' distprocdir 'matpid' num2str(i) ]);
fprintf(fid, '%s\n',['wait $mypid' ]);
fprintf(fid, '%s\n',['done' ]);
%fprintf(fid, '%s\n',['while kill -0 $mypid' ]);
%fprintf(fid, '%s\n',['do' ]);
%fprintf(fid, '%s\n',[' sleep 2s' ]);
%fprintf(fid, '%s\n',['done' ]);
fclose(fid);
unix(['chmod 755 ' tmpOutFName]);
% actually submit the jobs. tmoOutFName is the actual script that needs to be run on each node. In my case,
% warp.hpc1.cs.cmu.edu was the root node of the cluster, which handled handled queueing for the cluster via
% torque.
if(submitlocal)
logfileerr=[distprocdir 'stderr' num2str(i) '.log'];
logfileout=[distprocdir 'stdout' num2str(i) '.log'];
unix(['sleep ' num2str(floor(i/2)) ' && ' tmpOutFName ' >' logfileout ' 2>' logfileerr ' &']);
else
currchunk{end+1}=tmpOutFName;
if(numel(currchunk)>=chunksize)
submitchunk(currchunk,qsubopts,nchunks,target);
nchunks=nchunks+1;
currchunk={};
end
end
end
if(~isempty(currchunk))
submitchunk(currchunk,qsubopts,nchunks,target);
end
ds.sys.distproc.isopen=1;
end
function submitchunk(chunk,qsubopts,chunkid,target)
global ds;
distprocdir=[ds.sys.outdir 'ds/sys/distproc/'];
logfileerr=[distprocdir 'stderr' num2str(chunkid) '.log'];
logfileout=[distprocdir 'stdout' num2str(chunkid) '.log'];
logstring = ['-e "' logfileerr '" -o "' logfileout '"'];
if(numel(chunk)==1)
submitScr=chunk{1};
else
submitScr=[distprocdir 'qsubchunk' num2str(chunkid) '.sh'];
fid = fopen(submitScr, 'w');
for(i=1:numel(chunk))
fprintf(fid, '%s\n',[chunk{i} ' &'] );
end
fprintf(fid, '%s\n','wait;' );
fclose(fid)
end
qsub_cmd=['source /etc/profile; qsub -V -N dsjob' num2str(chunkid) ' ' qsubopts ' ' logstring ' ' submitScr]
ssh_cmd = sprintf(['echo ''%s'' | ssh ' target ' ''bash -s'''], qsub_cmd)
%ssh_cmd = sprintf(['ssh ' target ' ''%s'''], qsub_cmd)
unix(ssh_cmd);
end