source: trunk/oarutils/oar-parexec @ 93

Last change on this file since 93 was 89, checked in by g7moreau, 12 years ago
  • Example of transmit use
File size: 19.3 KB
RevLine 
[13]1#!/usr/bin/perl
2#
3# 2011/11/27 gabriel
4
5use strict;
6
7use Getopt::Long();
8use Pod::Usage;
9use Coro;
10use Coro::Semaphore;
11use Coro::Signal;
12use Coro::Channel;
13use Coro::Handle;
14use IO::File;
15use POSIX qw( WNOHANG WEXITSTATUS );
[32]16use Cwd qw( getcwd );
[13]17
[75]18my $file;
19my $dir;
20my $cmd;
21my $logtrace;
[13]22my $verbose;
[82]23my $job_np         = 1;
24my $nodefile       = $ENV{OAR_NODE_FILE} || '';
[32]25my $masterio;
[13]26my $switchio;
27my $help;
[82]28my $oarsh          = 'oarsh -q -T';
[75]29my $sig_transmit;
30my $sig_checkpoint = 'USR2';
[13]31
32Getopt::Long::GetOptions(
[47]33   'file=s'     => \$file,
[45]34   'dir=s'      => \$dir,
35   'cmd=s'      => \$cmd,
[43]36   'logtrace=s' => \$logtrace,
[32]37   'verbose'    => \$verbose,
38   'help'       => \$help,
39   'oarsh=s'    => \$oarsh,
[34]40   'jobnp=i'    => \$job_np,
[32]41   'nodefile=s' => \$nodefile,
42   'masterio=s' => \$masterio,
43   'switchio'   => \$switchio,
[75]44   'transmit'   => \$sig_transmit,
45   'kill=s'     => \$sig_checkpoint,
[41]46   ) || pod2usage(-verbose => 0);
47pod2usage(-verbose => 2) if $help;
[45]48pod2usage(-verbose => 2) if not (
[47]49 (-e "$file")
[45]50 or (-d "$dir" and $cmd ne '')
51 );
[13]52
[43]53# re-run, keep trace of job already done
[38]54my %state;
55my $log_h = IO::File->new();
[45]56if (-e "$logtrace") {
[43]57   $log_h->open("< $logtrace")
58      or die "error: can't read log file: $!";
[38]59   while (<$log_h>) {
[45]60      $state{$1} = 'start' if m/^start\s+job\s+([^\s]+)\s/;
61      $state{$1} = 'end'   if m/^end\s+job\s+([^\s]+)\s/;
[41]62      }
[38]63   $log_h->close();
64   }
[43]65if ($logtrace) {
66   $log_h->open(">> $logtrace")
67      or die "error: can't append log file $logtrace: $!";
[40]68   $log_h->autoflush;
[38]69   $log_h = unblock $log_h;
70   }
71
[43]72# job to run
[13]73my @job = ();
[47]74if (-e "$file") {
[45]75   my $job_num = 0;
[47]76   open(JOB_LIST, '<', "$file") or die "error: can't open job file $file: $!";
[77]77   while (my $job_cmd = <JOB_LIST>) {
78      chomp $job_cmd;
79      next if $job_cmd =~ m/^#/;
80      next if $job_cmd =~ m/^\s*$/;
[45]81      $job_num++;
[77]82      my ($job_name) = $job_cmd =~ m/#.*?\bname=(\S+?)\b/i;
83      $job_name ||= $job_num;
[88]84      push @job, {
85         name   => $job_name,
86         cmd    => "$job_cmd",
87         num    => $job_num,
88         };
[45]89      }
90   close JOB_LIST;
[13]91   }
[45]92else {
[88]93   my $job_num = 0;
[45]94   opendir(DIR, $dir) or die "error: can't open folder $dir: $!";
95   while (my $item = readdir(DIR)) {
96      next if $item =~ m/^\./;
97      next if $item =~ m/:/;
98      next if $item =~ m/\.old$/;
99      next if $item =~ m/\.sav$/;
100      next if $item =~ m/\.bak$/;
101      next if $item =~ m/\.no$/;
102      next unless (-d "$dir/$item");
[88]103      $job_num++;
104      push @job, {
105         name   => $item,
106         cmd    => "cd $dir/$item/; $cmd",
107         num    => $job_num,
108         };
[45]109      }
110   closedir DIR;
111   }
[13]112
[88]113# assume unique job name
114{
115   my %seen = ();
116   my $count_unique_name = grep { ! $seen{ $_->{name} }++ } @job;
117   if ($count_unique_name != $#job) {
118      $_->{name} = $_->{num} for @job;
119      }
120   }
121
[43]122# ressources available
[34]123my @ressources = ();
[41]124open(NODE_FILE, '<', "$nodefile")
[34]125   or die "can't open $nodefile: $!";
126while (<NODE_FILE>) {
127   chomp;
128   next if m/^#/;
129   next if m/^\s*$/;
[41]130   push @ressources, $_;
[34]131   }
132close NODE_FILE;
133
134my $ressource_size = scalar(@ressources);
[43]135die "error: not enought ressources jobnp $job_np > ressources $ressource_size"
[41]136   if $job_np > $ressource_size;
[34]137
138my $current_dir = getcwd();
139
[32]140my $stderr = $ENV{OAR_STDERR} || '';
[13]141$stderr =~ s/\.stderr$//;
[32]142$stderr = $masterio if $masterio;
143my $stdout = $ENV{OAR_STDOUT} || '';
[13]144$stdout =~ s/\.stdout$//;
[32]145$stdout = $masterio if $masterio;
[13]146
147my $finished = new Coro::Signal;
148my $job_todo = new Coro::Semaphore 0;
[45]149my $job_name_maxlen;
150for (@job) {
151   $job_todo->up;
152   $job_name_maxlen = length($_->{name}) if length($_->{name}) > $job_name_maxlen;
153   }
[13]154
[43]155# slice of ressources for parallel job
[13]156my $ressources = new Coro::Channel;
[34]157for my $slot (1 .. int($ressource_size / $job_np)) {
[41]158   $ressources->put(
159      join(',',
160         @ressources[ (($slot - 1) * $job_np) .. (($slot * $job_np) - 1) ])
161         );
[13]162   }
163
164my %scheduled = ();
165
[43]166# OAR checkpoint and default signal SIGUSR2
[39]167my $oar_checkpoint = new Coro::Semaphore 0;
[84]168my $notify         = new Coro::Signal;
[75]169$SIG{$sig_checkpoint} = sub {
[42]170   print "warning: receive checkpoint at "
171      . time
172      . ", no new job, just finishing running job\n"
173      if $verbose;
174   $oar_checkpoint->up();
[84]175   $notify->send if $sig_transmit;
[42]176   };
[39]177
[81]178# asynchrone notify job
179async {
180   while () {
[84]181      $notify->wait;
[81]182
[84]183      for my $job_pid (keys %scheduled) {
184         my $job_name     = $scheduled{$job_pid}->{name};
185         my $job_pidfile  = $scheduled{$job_pid}->{pidfile};
186         my $node_connect = $scheduled{$job_pid}->{node_connect};
[81]187
[84]188         my $fh = IO::File->new();
189         $fh->open("| $oarsh $node_connect >/dev/null 2>&1")
190            or die "error: can't notify subjob: $!";
[81]191
[84]192         $fh->autoflush;
193         $fh = unblock $fh;
[81]194
[84]195         $fh->print("kill -$sig_checkpoint \$(cat $job_pidfile)\n");
196         $fh->print("exit\n");
[81]197
[84]198         print "warning: transmit signal $sig_checkpoint"
199            . " to job $job_name on node $node_connect.\n"
200            if $verbose;
[82]201
[84]202         close $fh;
203         cede;
[81]204         }
205      }
206   }
207
[43]208# asynchrone start job block
[13]209async {
[81]210   JOB:
[13]211   for my $job (@job) {
[83]212      my $job_name   = $job->{name};
213      my $job_cmd    = $job->{cmd};
[38]214
[43]215      # job has been already run ?
[45]216      if (exists $state{$job_name}) {
217         if ($state{$job_name} eq 'start') {
218            print "warning: job $job_name was not clearly finished, relaunching...\n"
[41]219               if $verbose;
220            }
[45]221         elsif ($state{$job_name} eq 'end') {
222            delete $state{$job_name}; # free memory
[41]223            $job_todo->down;
[45]224            print "warning: job $job_name already run\n" if $verbose;
[41]225            cede;
[43]226            next JOB;
[41]227            }
228         }
[40]229
[43]230      # take job ressource
[36]231      my $job_ressource = $ressources->get;
[13]232
[43]233      # no more launch job when OAR checkpointing
234      last JOB if $oar_checkpoint->count() > 0;
[39]235
[36]236      my ($node_connect) = split ',', $job_ressource;
[41]237      my $fh = IO::File->new();
[34]238      my $job_pid = $fh->open("| $oarsh $node_connect >/dev/null 2>&1")
[43]239         or die "error: can't start subjob: $!";
[13]240
241      $fh->autoflush;
242      $fh = unblock $fh;
243
[45]244      my $msg = sprintf "start job %${job_name_maxlen}s / %5i at %s on node %s\n",
245         $job_name, $job_pid, time, $job_ressource;
[43]246      $log_h->print($msg) if $logtrace;
[42]247      print($msg) if $verbose;
[13]248
[41]249      my ($job_stdout, $job_stderr);
[45]250      $job_stdout = ">  $stdout-$job_name.stdout" if $stdout ne '' and $switchio;
251      $job_stderr = "2> $stderr-$job_name.stderr" if $stderr ne '' and $switchio;
[13]252
[82]253      my $job_nodefile = "/tmp/oar-parexec-$ENV{LOGNAME}-$ENV{OAR_JOB_ID}-$job_name";
254      my $job_pidfile  = "/tmp/oar-parexec-$ENV{LOGNAME}-$ENV{OAR_JOB_ID}-$job_name.pid";
[34]255
[81]256      $scheduled{$job_pid} = {
257         fh           => $fh,
258         node_connect => $node_connect,
259         ressource    => $job_ressource,
260         name         => $job_name,
261         pidfile      => $job_pidfile,
262         };
263
264      # set job environment, run it and clean
[34]265      if ($job_np > 1) {
[36]266         $fh->print("printf \""
[41]267               . join('\n', split(',', $job_ressource,))
268               . "\" > $job_nodefile\n");
[37]269         $fh->print("OAR_NODE_FILE=$job_nodefile\n");
[34]270         $fh->print("OAR_NP=$job_np\n");
[37]271         $fh->print("export OAR_NODE_FILE\n");
[34]272         $fh->print("export OAR_NP\n");
273         $fh->print("unset OAR_MSG_NODEFILE\n");
274         }
[88]275
[32]276      $fh->print("cd $current_dir\n");
[88]277
[81]278      if ($sig_transmit) {
[87]279         $fh->print("trap 'jobs -p|xargs -r ps -o pid --no-headers --ppid|xargs -r kill -$sig_checkpoint' $sig_checkpoint\n");
[81]280         $fh->print("echo \$\$ > $job_pidfile\n");
281         }
[88]282
283      $fh->print("(\n");
284      $fh->print("$job_cmd\n");
285      $fh->print(") $job_stdout $job_stderr \&\n");
286      $fh->print("while [ \$(jobs -p | wc -l) -gt 0 ]\n");
287      $fh->print("do\n");
288      $fh->print("   wait\n");
289      $fh->print("done\n");
290
291      $fh->print("rm -f $job_pidfile\n")  if $sig_transmit;
[34]292      $fh->print("rm -f $job_nodefile\n") if $job_np > 1;
[13]293      $fh->print("exit\n");
294      cede;
295      }
296   }
297
[43]298# asynchrone end job block
[13]299async {
300   while () {
[41]301      for my $job_pid (keys %scheduled) {
[82]302         # non blocking PID test
[41]303         if (waitpid($job_pid, WNOHANG)) {
[45]304            my $msg = sprintf "end   job %${job_name_maxlen}s / %5i at %s on node %s\n",
305               $scheduled{$job_pid}->{name},
[42]306               $job_pid, time, $scheduled{$job_pid}->{ressource};
[76]307
308            # Job non finish, just suspend if received checkpoint signal
309            $msg =~ s/^end\s+job/suspend job/
310               if $sig_transmit and $oar_checkpoint->count() > 0;
311
[43]312            $log_h->print($msg) if $logtrace;
[42]313            print($msg) if $verbose;
[13]314            close $scheduled{$job_pid}->{fh};
[43]315            # leave ressources for another job
[41]316            $ressources->put($scheduled{$job_pid}->{ressource});
[13]317            $job_todo->down;
318            delete $scheduled{$job_pid};
319            }
320         cede;
321         }
322
[43]323      # checkpointing ! just finishing running job and quit
[42]324      $finished->send if $oar_checkpoint->count() > 0 and scalar(keys(%scheduled)) == 0;
[39]325
[42]326      $finished->send if $job_todo->count() == 0;
[13]327      cede;
328      }
329   }
330
331cede;
332
[43]333# all job have been done
[13]334$finished->wait;
335
[43]336# close log trace file
337$log_h->close() if $logtrace;
[38]338
[13]339__END__
340
341=head1 NAME
342
[88]343oar-parexec - parallel execution of many small short or long job
[13]344
345=head1 SYNOPSIS
346
[47]347 oar-parexec --file filecommand \
348    [--logtrace tracefile] [--verbose] \
349    [--jobnp integer] [--nodefile filenode] [--oarsh sssh] \
[88]350    [--switchio] [--masterio basefileio] \
351    [--kill signal] [--transmit]
[46]352
[47]353 oar-parexec --dir foldertoiterate --cmd commandtolaunch \
354    [--logtrace tracefile] [--verbose] \
355    [--jobnp integer] [--nodefile filenode] [--oarsh sssh] \
[88]356    [--switchio] [--masterio basefileio] \
357    [--kill signal] [--transmit]
[46]358
[13]359 oar-parexec --help
360
[32]361=head1 DESCRIPTION
362
[88]363C<oar-parexec> can execute lot of small short or long job in parallel inside a cluster.
364Number of parallel job at one time cannot exceed the number of core define in the node file.
[32]365C<oar-parexec> is easier to use inside an OAR job environment
[44]366which define automatically these strategics parameters...
367However, it can be used outside OAR.
[32]368
[47]369Option C<--file> or C<--dir> and C<--cmd> are the only mandatory parameters.
[32]370
371Small job will be launch in the same folder as the master job.
[44]372Two environment variable are defined for each small job
[37]373and only in case of parallel small job (option C<--jobnp> > 1).
[32]374
[34]375 OAR_NODE_FILE - file that list node for parallel computing
376 OAR_NP        - number of processor affected
[32]377
[44]378The file define by OAR_NODE_FILE is created  in /tmp
379on the node before launching the small job
380and this file will be delete after job complete.
[34]381C<oar-parexec> is a simple script,
382OAR_NODE_FILE will not be deleted in case of crash of the master job.
383
[37]384OAR define other variable that are equivalent to OAR_NODE_FILE:
385OAR_NODEFILE, OAR_FILE_NODES, OAR_RESOURCE_FILE...
386You can use in your script the OAR original file ressources
387by using these variable if you need it.
[34]388
[88]389When use with long job,
390activate option C<--tranmit> to send OAR checkpoint signal
391and suspend small job before the walltime cut!
[82]392
[13]393=head1 OPTIONS
394
[32]395=over 12
[13]396
[47]397=item B<-f|--file filecommand>
[13]398
[32]399File name which content job list.
[45]400For the JOB_NAME definition,
401the first valid job in the list will have the number 1 and so on...
[13]402
[77]403It's possible to fix the name inside a comment on the job line.
404For example:
405
406 $HOME/test/subjob1.sh # name=subjob1
407
408The key C<name> is case insensitive,
409the associated value cannot have a space...
410
[88]411The command can be any shell command.
412It's possible to change folder,
413or launch an asynchrone job in parallel,
414but one command must block and not be launch in asynchrone (with & or coproc).
415Example :
416
417 cd ./test; ./subjob1.sh
418 cd ./test; nice -18 du -sk ./ & ./test/subjob1.sh
419
420Command C<du -sk ./> will be done in parallel on the same ressource...
421
[47]422=item B<-d|--dir foldertoiterate>
[45]423
424Command C<--cmd> will be launch in all sub-folder of this master folder.
425Files in this folder will be ignored.
[47]426Sub-folder name which begin with F<.>
427or finish with F<.old>, F<.sav>, F<.bak>, F<.no> will either be ignored...
[45]428
429The JOB_NAME is simply the Sub-folder name.
430
431=item B<-c|--cmd commandtolaunch>
432
[88]433Command (and argument to it) that will be launch in all sub-folder
434parameter folfer C<--dir>.
435Like for option C<--file>, command can be any valid shell command
436but one must block.
[45]437
[43]438=item B<-l|--logtrace tracefile>
439
440File which log and trace running job.
[44]441In case of running the same master command (after crash for example),
442only job that are not mark as done will be run again.
443Be careful, job mark as running (start but not finish) will be run again.
[45]444Tracing is base on the JOB_NAME between multiple run.
[43]445
446This option is very usefull in case of crash
447but also for checkpointing and idempotent OAR job.
448
[32]449=item B<-v|--verbose>
[13]450
[34]451=item B<-j|--jobnp integer>
[13]452
[34]453Number of processor to allocated for each small job.
4541 by default.
455
456=item B<-n|--nodefile filenode>
457
[44]458File name that list all the node where job could be launch.
[32]459By defaut, it's define automatically by OAR via
460environment variable C<OAR_NODE_FILE>.
[13]461
[32]462For example, if you want to use 6 core on your cluster node,
463you need to put 6 times the hostname node in this file,
464one per line...
465It's a very common file in MPI process !
[13]466
[46]467=item B<-o|-oarsh command>
[13]468
[46]469Command use to launch a shell on a node.
470By default
[13]471
[46]472 oarsh -q -T
473
474Change it to C<ssh> if you are not using an OAR cluster...
475
[32]476=item B<-s|--switchio>
[21]477
[32]478Each small job will have it's own output STDOUT and STDERR
[45]479base on master OAR job with C<JOB_NAME> inside
[32]480(or base on C<basefileio> if option C<masterio>).
481Example :
[21]482
[45]483 OAR.151524.stdout -> OAR.151524-JOB_NAME.stdout
[21]484
[32]485where 151524 here is the master C<OAR_JOB_ID>
[45]486and C<JOB_NAME> is the small job name.
[21]487
[46]488=item B<-m|--masterio basefileio>
[32]489
[46]490The C<basefileio> will be use in place of environment variable
491C<OAR_STDOUT> and C<OAR_STDERR> (without extension) to build the base name of the small job standart output
492(only use when option C<swithio> is activated).
[32]493
[78]494=item B<-k|--kill signal>
495
496Signal to listen and make a clean stop of the current C<oar-parexec> process.
497By default, use USR2 signal (see C<kill -l>> for a list of possible signal).
498
499=item B<-t|--transmit>
500
501Resend catch signal to sub-job when receiving it.
502By default, no signal is transmis to child process.
503
504It's only valuable if use for long sub-job than can
505in return make themselves a clean restart.
506
507
[32]508=item B<-h|--help>
509
510=back
511
512
513=head1 EXAMPLE
514
[44]515=head2 Simple list of sequential job
516
[47]517Content for the job file command (option C<--file>) could have:
[21]518
[13]519 - empty line
520 - comment line begin with #
[86]521 - valid shell command (can containt comment)
[13]522
523Example where F<$HOME/test/subjob1.sh> is a shell script (executable).
524
[86]525 $HOME/test/subjob01.sh  # name=subjob01
526 $HOME/test/subjob02.sh  # name=subjob02
527 $HOME/test/subjob03.sh  # name=subjob03
528 $HOME/test/subjob04.sh  # name=subjob04
[32]529 ...
[86]530 $HOME/test/subjob38.sh  # name=subjob38
531 $HOME/test/subjob39.sh  # name=subjob39
532 $HOME/test/subjob40.sh  # name=subjob40
[13]533
[44]534These jobs could be launch by:
[13]535
[49]536 oarsub -n test -l /core=6,walltime=04:00:00 \
537   "oar-parexec -f ./subjob.list.txt"
[13]538
[47]539=head2 Folder job
540
541In a folder F<subjob.d>, create sub-folder with your data inside : F<test1>, <test2>...
542The same command will be executed in every sub-folder.
543C<oar-parexec> change the current directory to the sub-folder before launching it.
544
545A very simple job could be:
546
[49]547 oarsub -n test -l /core=6,walltime=04:00:00 \
548   "oar-parexec -d ./subjob.d -c 'sleep 10; env'"
[47]549
550The command C<env> will be excuted in all folder F<test1>, F<test2>... after a 10s pause.
551
552Sometime, it's simpler to use file list command,
553sometime, jobs by folder with the same command run is more relevant.
554
[44]555=head2 Parallel job
[28]556
[44]557You need to put the number of core each small job need with option C<--jobnp>.
558If your job is build on OpenMP or MPI,
559you can use OAR_NP and OAR_NODE_FILE variables to configure them.
560On OAR cluster, you need to use C<oarsh> or a wrapper like C<oar-envsh>
561for connexion between node instead of C<ssh>.
562
563Example with parallel small job on 2 core:
564
[49]565 oarsub -n test -l /core=6,walltime=04:00:00 \
566   "oar-parexec -j 2 -f ./subjob.list.txt"
[44]567
568=head2 Tracing and master crash
569
570If the master node crash after hours of calculus, everything is lost ?
571No, with option C<--logtrace>,
572it's possible to remember older result
573and not re-run these job the second and next time.
574
[49]575 oarsub -n test -l /core=6,walltime=04:00:00 \
576   "oar-parexec -f ./subjob.list.txt -l ./subjob.list.log"
[44]577
578After a crash or an C<oardel> command,
579you can then re-run the same command that will end to execute the jobs in the list
580
[49]581 oarsub -n test -l /core=6,walltime=04:00:00 \
582   "oar-parexec -f ./subjob.list.txt -l ./subjob.list.log"
[44]583
584C<logtrace> file are just plain file.
585We use the extension '.log' because these files are automatically
586eliminate from our backup system!
587
588=head2 Checkpointing and Idempotent
589
590C<oar-parexec> is compatible with the OAR checkpointing.
[89]591If you have 2000 small jobs that need 55h to be done on 6 cores,
[44]592you can cut this in small parts.
593
594For this example, we suppose that each small job need about 10min...
595So, we send a checkpoint 12min before the end of the process
596to let C<oar-parexec> finish the jobs started.
597After being checkpointed, C<oar-parexec> do not start any new small job.
598
[49]599 oarsub -t idempotent -n test \
600   -l /core=6,walltime=04:00:00 \
601   --checkpoint 720 \
[44]602   "oar-parexec -f ./subjob.list.txt -l ./subjob.list.log"
603
604After 3h48min, the OAR job will begin to stop launching new small job.
605When all running small job are finished, it's exit.
606But as the OAR job is type C<idempotent>,
607OAR will re-submit it as long as all small job are not executed...
608
609This way, we let other users a chance to use the cluster!
610
611In this last exemple, we use moldable OAR job with idempotent
612to reserve many core for a small time or a few cores for a long time:
613
614 oarsub -t idempotent -n test \
615   -l /core=50,walltime=01:05:00 \
616   -l /core=6,walltime=04:00:00 \
617   --checkpoint 720 \
618   "oar-parexec -f ./subjob.list.txt -l ./subjob.list.log"
619
[78]620=head2 Signal, recurse and long job
[44]621
[78]622By default, OAR use signal USR2 for checkpointing.
[79]623It's possible to change this with option C<--kill>.
[78]624
625When use with long small job, checkpointing could be too long...
[79]626More than walltime!
627The option C<--transmit> could be use to checkpoint small job!
628These long small job will then stop cleanly and will be restarted next time.
[78]629
630In the C<logtrace> file, small job will have the status suspend.
[79]631They will be launch with the same command line at the next OAR run.
[78]632
[89]633Example: if you have 50 small jobs that each need 72h to be done on 1 cores,
634you can cut this in 24h parts.
635
636For this example, we suppose that each long job loop need about 20min...
637So, we send a checkpoint 30min before the end of the process
638to let C<oar-parexec> suspend the jobs started.
639After being checkpointed, C<oar-parexec> do not start any new small job.
640
641 oarsub -t idempotent -n test \
642   -l /core=6,walltime=24:00:00 \
643   --checkpoint 1800 \
644   --transmit \
645   "oar-parexec -f ./subjob.list.txt -l ./subjob.list.log"
646
647After 23h30min, the OAR job will begin to stop launching new small job.
648When all running small job are suspend, it's exit.
649But as the OAR job is type C<idempotent>,
650OAR will re-submit it as long as all small job are not finished...
651
[21]652=head1 SEE ALSO
653
[44]654oar-dispatch, mpilauncher,
655orsh, oar-envsh, ssh
[21]656
657
[13]658=head1 AUTHORS
659
[21]660Written by Gabriel Moreau, Grenoble - France
[13]661
[21]662
663=head1 LICENSE AND COPYRIGHT
664
665GPL version 2 or later and Perl equivalent
666
[28]667Copyright (C) 2011 Gabriel Moreau / LEGI - CNRS UMR 5519 - France
[21]668
Note: See TracBrowser for help on using the repository browser.