1 | #!/usr/bin/perl |
---|
2 | # |
---|
3 | # 2011/11/27 gabriel |
---|
4 | |
---|
5 | use strict; |
---|
6 | |
---|
7 | use Getopt::Long(); |
---|
8 | use Pod::Usage; |
---|
9 | use Coro; |
---|
10 | use Coro::Semaphore; |
---|
11 | use Coro::Signal; |
---|
12 | use Coro::Channel; |
---|
13 | use Coro::Handle; |
---|
14 | use IO::File; |
---|
15 | use POSIX qw( WNOHANG WEXITSTATUS ); |
---|
16 | use Cwd qw( getcwd ); |
---|
17 | |
---|
18 | my $filecmd = ''; |
---|
19 | my $logtrace = ''; |
---|
20 | my $verbose; |
---|
21 | my $job_np = 1; |
---|
22 | my $nodefile = $ENV{OAR_NODE_FILE} || ''; |
---|
23 | my $masterio; |
---|
24 | my $switchio; |
---|
25 | my $help; |
---|
26 | my $oarsh = 'oarsh -q -T'; |
---|
27 | |
---|
28 | Getopt::Long::GetOptions( |
---|
29 | 'filecmd=s' => \$filecmd, |
---|
30 | 'logtrace=s' => \$logtrace, |
---|
31 | 'verbose' => \$verbose, |
---|
32 | 'help' => \$help, |
---|
33 | 'oarsh=s' => \$oarsh, |
---|
34 | 'jobnp=i' => \$job_np, |
---|
35 | 'nodefile=s' => \$nodefile, |
---|
36 | 'masterio=s' => \$masterio, |
---|
37 | 'switchio' => \$switchio, |
---|
38 | ) || pod2usage(-verbose => 0); |
---|
39 | pod2usage(-verbose => 2) if $help; |
---|
40 | pod2usage(-verbose => 2) if not -e $filecmd; |
---|
41 | |
---|
42 | # re-run, keep trace of job already done |
---|
43 | my %state; |
---|
44 | my $log_h = IO::File->new(); |
---|
45 | if (-e $logtrace) { |
---|
46 | $log_h->open("< $logtrace") |
---|
47 | or die "error: can't read log file: $!"; |
---|
48 | while (<$log_h>) { |
---|
49 | $state{$1} = 'start' if m/^start\s+job\s+(\d+)\s/; |
---|
50 | $state{$1} = 'end' if m/^end\s+job\s+(\d+)\s/; |
---|
51 | } |
---|
52 | $log_h->close(); |
---|
53 | } |
---|
54 | if ($logtrace) { |
---|
55 | $log_h->open(">> $logtrace") |
---|
56 | or die "error: can't append log file $logtrace: $!"; |
---|
57 | $log_h->autoflush; |
---|
58 | $log_h = unblock $log_h; |
---|
59 | } |
---|
60 | |
---|
61 | # job to run |
---|
62 | my @job = (); |
---|
63 | open(JOB_LIST, '<', "$filecmd") or die "error: can't open job file $filecmd: $!"; |
---|
64 | while (<JOB_LIST>) { |
---|
65 | chomp; |
---|
66 | next if m/^#/; |
---|
67 | next if m/^\s*$/; |
---|
68 | push @job, $_; |
---|
69 | } |
---|
70 | close JOB_LIST; |
---|
71 | |
---|
72 | # ressources available |
---|
73 | my @ressources = (); |
---|
74 | open(NODE_FILE, '<', "$nodefile") |
---|
75 | or die "can't open $nodefile: $!"; |
---|
76 | while (<NODE_FILE>) { |
---|
77 | chomp; |
---|
78 | next if m/^#/; |
---|
79 | next if m/^\s*$/; |
---|
80 | push @ressources, $_; |
---|
81 | } |
---|
82 | close NODE_FILE; |
---|
83 | |
---|
84 | my $ressource_size = scalar(@ressources); |
---|
85 | die "error: not enought ressources jobnp $job_np > ressources $ressource_size" |
---|
86 | if $job_np > $ressource_size; |
---|
87 | |
---|
88 | my $current_dir = getcwd(); |
---|
89 | |
---|
90 | my $stderr = $ENV{OAR_STDERR} || ''; |
---|
91 | $stderr =~ s/\.stderr$//; |
---|
92 | $stderr = $masterio if $masterio; |
---|
93 | my $stdout = $ENV{OAR_STDOUT} || ''; |
---|
94 | $stdout =~ s/\.stdout$//; |
---|
95 | $stdout = $masterio if $masterio; |
---|
96 | |
---|
97 | my $finished = new Coro::Signal; |
---|
98 | my $job_todo = new Coro::Semaphore 0; |
---|
99 | $job_todo->up for (@job); |
---|
100 | |
---|
101 | # slice of ressources for parallel job |
---|
102 | my $ressources = new Coro::Channel; |
---|
103 | for my $slot (1 .. int($ressource_size / $job_np)) { |
---|
104 | $ressources->put( |
---|
105 | join(',', |
---|
106 | @ressources[ (($slot - 1) * $job_np) .. (($slot * $job_np) - 1) ]) |
---|
107 | ); |
---|
108 | } |
---|
109 | |
---|
110 | my $job_num = 0; |
---|
111 | my %scheduled = (); |
---|
112 | |
---|
113 | # OAR checkpoint and default signal SIGUSR2 |
---|
114 | my $oar_checkpoint = new Coro::Semaphore 0; |
---|
115 | $SIG{USR2} = sub { |
---|
116 | print "warning: receive checkpoint at " |
---|
117 | . time |
---|
118 | . ", no new job, just finishing running job\n" |
---|
119 | if $verbose; |
---|
120 | $oar_checkpoint->up(); |
---|
121 | }; |
---|
122 | |
---|
123 | # asynchrone start job block |
---|
124 | async { |
---|
125 | JOB: |
---|
126 | for my $job (@job) { |
---|
127 | $job_num++; |
---|
128 | |
---|
129 | # job has been already run ? |
---|
130 | if (exists $state{$job_num}) { |
---|
131 | if ($state{$job_num} eq 'start') { |
---|
132 | print "warning: job $job_num was not clearly finished, relaunching...\n" |
---|
133 | if $verbose; |
---|
134 | } |
---|
135 | elsif ($state{$job_num} eq 'end') { |
---|
136 | delete $state{$job_num}; # free memory |
---|
137 | $job_todo->down; |
---|
138 | print "warning: job $job_num already run\n" if $verbose; |
---|
139 | cede; |
---|
140 | next JOB; |
---|
141 | } |
---|
142 | } |
---|
143 | |
---|
144 | # take job ressource |
---|
145 | my $job_ressource = $ressources->get; |
---|
146 | |
---|
147 | # no more launch job when OAR checkpointing |
---|
148 | last JOB if $oar_checkpoint->count() > 0; |
---|
149 | |
---|
150 | my ($node_connect) = split ',', $job_ressource; |
---|
151 | my $fh = IO::File->new(); |
---|
152 | my $job_pid = $fh->open("| $oarsh $node_connect >/dev/null 2>&1") |
---|
153 | or die "error: can't start subjob: $!"; |
---|
154 | |
---|
155 | $fh->autoflush; |
---|
156 | $fh = unblock $fh; |
---|
157 | |
---|
158 | $scheduled{$job_pid} = { |
---|
159 | fh => $fh, |
---|
160 | node_connect => $node_connect, |
---|
161 | ressource => $job_ressource, |
---|
162 | num => $job_num |
---|
163 | }; |
---|
164 | |
---|
165 | my $msg = sprintf "start job %5i / %5i at %s on node %s\n", |
---|
166 | $job_num, $job_pid, time, $job_ressource; |
---|
167 | $log_h->print($msg) if $logtrace; |
---|
168 | print($msg) if $verbose; |
---|
169 | |
---|
170 | my ($job_stdout, $job_stderr); |
---|
171 | $job_stdout = "> $stdout-$job_num.stdout" if $stdout ne '' and $switchio; |
---|
172 | $job_stderr = "2> $stderr-$job_num.stderr" if $stderr ne '' and $switchio; |
---|
173 | |
---|
174 | my $job_nodefile = "/tmp/oar-parexec-$ENV{LOGNAME}-$job_num"; |
---|
175 | |
---|
176 | # set job environment, run it and clean |
---|
177 | if ($job_np > 1) { |
---|
178 | $fh->print("printf \"" |
---|
179 | . join('\n', split(',', $job_ressource,)) |
---|
180 | . "\" > $job_nodefile\n"); |
---|
181 | $fh->print("OAR_NODE_FILE=$job_nodefile\n"); |
---|
182 | $fh->print("OAR_NP=$job_np\n"); |
---|
183 | $fh->print("export OAR_NODE_FILE\n"); |
---|
184 | $fh->print("export OAR_NP\n"); |
---|
185 | $fh->print("unset OAR_MSG_NODEFILE\n"); |
---|
186 | } |
---|
187 | $fh->print("cd $current_dir\n"); |
---|
188 | $fh->print("$job $job_stdout $job_stderr\n"); |
---|
189 | $fh->print("rm -f $job_nodefile\n") if $job_np > 1; |
---|
190 | $fh->print("exit\n"); |
---|
191 | cede; |
---|
192 | } |
---|
193 | } |
---|
194 | |
---|
195 | # asynchrone end job block |
---|
196 | async { |
---|
197 | while () { |
---|
198 | for my $job_pid (keys %scheduled) { |
---|
199 | # non blocking PID test |
---|
200 | if (waitpid($job_pid, WNOHANG)) { |
---|
201 | my $msg = sprintf "end job %5i / %5i at %s on node %s\n", |
---|
202 | $scheduled{$job_pid}->{num}, |
---|
203 | $job_pid, time, $scheduled{$job_pid}->{ressource}; |
---|
204 | $log_h->print($msg) if $logtrace; |
---|
205 | print($msg) if $verbose; |
---|
206 | close $scheduled{$job_pid}->{fh}; |
---|
207 | # leave ressources for another job |
---|
208 | $ressources->put($scheduled{$job_pid}->{ressource}); |
---|
209 | $job_todo->down; |
---|
210 | delete $scheduled{$job_pid}; |
---|
211 | } |
---|
212 | cede; |
---|
213 | } |
---|
214 | |
---|
215 | # checkpointing ! just finishing running job and quit |
---|
216 | $finished->send if $oar_checkpoint->count() > 0 and scalar(keys(%scheduled)) == 0; |
---|
217 | |
---|
218 | $finished->send if $job_todo->count() == 0; |
---|
219 | cede; |
---|
220 | } |
---|
221 | } |
---|
222 | |
---|
223 | cede; |
---|
224 | |
---|
225 | # all job have been done |
---|
226 | $finished->wait; |
---|
227 | |
---|
228 | # close log trace file |
---|
229 | $log_h->close() if $logtrace; |
---|
230 | |
---|
231 | __END__ |
---|
232 | |
---|
233 | =head1 NAME |
---|
234 | |
---|
235 | oar-parexec - parallel execute lot of small job |
---|
236 | |
---|
237 | =head1 SYNOPSIS |
---|
238 | |
---|
239 | oar-parexec --filecmd filecommand [--logtrace tracefile] [--verbose] [--jobnp integer] [--nodefile filenode] [--masterio basefileio] [--switchio] [--oarsh sssh] |
---|
240 | oar-parexec --help |
---|
241 | |
---|
242 | =head1 DESCRIPTION |
---|
243 | |
---|
244 | C<oar-parexec> execute lot of small job.in parallel inside a cluster. |
---|
245 | Number of parallel job at one time cannot excede core number in the node file. |
---|
246 | C<oar-parexec> is easier to use inside an OAR job environment |
---|
247 | which define automatically theses strategics parameters... |
---|
248 | |
---|
249 | Option C<--filecmd> is the only mandatory one. |
---|
250 | |
---|
251 | Small job will be launch in the same folder as the master job. |
---|
252 | Two environment variable are define for each small job |
---|
253 | and only in case of parallel small job (option C<--jobnp> > 1). |
---|
254 | |
---|
255 | OAR_NODE_FILE - file that list node for parallel computing |
---|
256 | OAR_NP - number of processor affected |
---|
257 | |
---|
258 | The file define by OAR_NODE_FILE is created on the node before launching |
---|
259 | the small job in /tmp and will be delete after... |
---|
260 | C<oar-parexec> is a simple script, |
---|
261 | OAR_NODE_FILE will not be deleted in case of crash of the master job. |
---|
262 | |
---|
263 | OAR define other variable that are equivalent to OAR_NODE_FILE: |
---|
264 | OAR_NODEFILE, OAR_FILE_NODES, OAR_RESOURCE_FILE... |
---|
265 | You can use in your script the OAR original file ressources |
---|
266 | by using these variable if you need it. |
---|
267 | |
---|
268 | |
---|
269 | =head1 OPTIONS |
---|
270 | |
---|
271 | =over 12 |
---|
272 | |
---|
273 | =item B<-f|--filecmd filecommand> |
---|
274 | |
---|
275 | File name which content job list. |
---|
276 | |
---|
277 | =item B<-l|--logtrace tracefile> |
---|
278 | |
---|
279 | File which log and trace running job. |
---|
280 | In case of running the same command (after crash for example), |
---|
281 | only job that ar not mark as done will be run again. |
---|
282 | Be carefful, job mark as running (start but for finish) will be run again. |
---|
283 | |
---|
284 | This option is very usefull in case of crash |
---|
285 | but also for checkpointing and idempotent OAR job. |
---|
286 | |
---|
287 | =item B<-v|--verbose> |
---|
288 | |
---|
289 | =item B<-j|--jobnp integer> |
---|
290 | |
---|
291 | Number of processor to allocated for each small job. |
---|
292 | 1 by default. |
---|
293 | |
---|
294 | =item B<-n|--nodefile filenode> |
---|
295 | |
---|
296 | File name that list all the node to launch job. |
---|
297 | By defaut, it's define automatically by OAR via |
---|
298 | environment variable C<OAR_NODE_FILE>. |
---|
299 | |
---|
300 | For example, if you want to use 6 core on your cluster node, |
---|
301 | you need to put 6 times the hostname node in this file, |
---|
302 | one per line... |
---|
303 | It's a very common file in MPI process ! |
---|
304 | |
---|
305 | =item B<-m|--masterio basefileio> |
---|
306 | |
---|
307 | The C<basefileio> will be use in place of environment variable |
---|
308 | C<OAR_STDOUT> and C<OAR_STDERR> (without extension) to build the base name of the small job standart output |
---|
309 | (only use when option C<swithio> is activated). |
---|
310 | |
---|
311 | =item B<-s|--switchio> |
---|
312 | |
---|
313 | Each small job will have it's own output STDOUT and STDERR |
---|
314 | base on master OAR job with C<JOB_NUM> inside |
---|
315 | (or base on C<basefileio> if option C<masterio>). |
---|
316 | Example : |
---|
317 | |
---|
318 | OAR.151524.stdout -> OAR.151524-JOB_NUM.stdout |
---|
319 | |
---|
320 | where 151524 here is the master C<OAR_JOB_ID> |
---|
321 | and C<JOB_NUM> is the small job nnumber. |
---|
322 | |
---|
323 | =item B<-o|-oarsh command> |
---|
324 | |
---|
325 | Command use to launch a shell on a node. |
---|
326 | By default |
---|
327 | |
---|
328 | oarsh -q -T |
---|
329 | |
---|
330 | =item B<-h|--help> |
---|
331 | |
---|
332 | =back |
---|
333 | |
---|
334 | |
---|
335 | =head1 EXAMPLE |
---|
336 | |
---|
337 | Content for the job file command (option C<--filecmd>) could have: |
---|
338 | |
---|
339 | - empty line |
---|
340 | - comment line begin with # |
---|
341 | - valid shell command |
---|
342 | |
---|
343 | Example where F<$HOME/test/subjob1.sh> is a shell script (executable). |
---|
344 | |
---|
345 | $HOME/test/subjob1.sh |
---|
346 | $HOME/test/subjob2.sh |
---|
347 | $HOME/test/subjob3.sh |
---|
348 | $HOME/test/subjob4.sh |
---|
349 | ... |
---|
350 | $HOME/test/subjob38.sh |
---|
351 | $HOME/test/subjob39.sh |
---|
352 | $HOME/test/subjob40.sh |
---|
353 | |
---|
354 | These jobs could be launch by |
---|
355 | |
---|
356 | oarsub -n test -l /core=6,walltime=00:35:00 "oar-parexec -f ./subjob.list.txt" |
---|
357 | |
---|
358 | |
---|
359 | =head1 SEE ALSO |
---|
360 | |
---|
361 | oar-dispatch, mpilauncher |
---|
362 | |
---|
363 | |
---|
364 | =head1 AUTHORS |
---|
365 | |
---|
366 | Written by Gabriel Moreau, Grenoble - France |
---|
367 | |
---|
368 | |
---|
369 | =head1 LICENSE AND COPYRIGHT |
---|
370 | |
---|
371 | GPL version 2 or later and Perl equivalent |
---|
372 | |
---|
373 | Copyright (C) 2011 Gabriel Moreau / LEGI - CNRS UMR 5519 - France |
---|
374 | |
---|