Package madgraph :: Package interface :: Module amcatnlo_run_interface
[hide private]
[frames] | no frames]

Source Code for Module madgraph.interface.amcatnlo_run_interface

   1  ################################################################################ 
   2  # 
   3  # Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors 
   4  # 
   5  # This file is a part of the MadGraph5_aMC@NLO project, an application which  
   6  # automatically generates Feynman diagrams and matrix elements for arbitrary 
   7  # high-energy processes in the Standard Model and beyond. 
   8  # 
   9  # It is subject to the MadGraph5_aMC@NLO license which should accompany this  
  10  # distribution. 
  11  # 
  12  # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch 
  13  # 
  14  ################################################################################ 
  15  """A user friendly command line interface to access MadGraph5_aMC@NLO features. 
  16     Uses the cmd package for command interpretation and tab completion. 
  17  """ 
  18  from __future__ import division 
  19   
  20  import atexit 
  21  import glob 
  22  import logging 
  23  import math 
  24  import optparse 
  25  import os 
  26  import pydoc 
  27  import random 
  28  import re 
  29  import shutil 
  30  import subprocess 
  31  import sys 
  32  import traceback 
  33  import time 
  34  import signal 
  35  import tarfile 
  36  import copy 
  37  import datetime 
  38  import tarfile 
  39  import traceback 
  40  import StringIO 
  41  try: 
  42      import cpickle as pickle 
  43  except: 
  44      import pickle 
  45   
  46  try: 
  47      import readline 
  48      GNU_SPLITTING = ('GNU' in readline.__doc__) 
  49  except: 
  50      GNU_SPLITTING = True 
  51   
  52  root_path = os.path.split(os.path.dirname(os.path.realpath( __file__ )))[0] 
  53  root_path = os.path.split(root_path)[0] 
  54  sys.path.insert(0, os.path.join(root_path,'bin')) 
  55   
  56  # usefull shortcut 
  57  pjoin = os.path.join 
  58  # Special logger for the Cmd Interface 
  59  logger = logging.getLogger('madgraph.stdout') # -> stdout 
  60  logger_stderr = logging.getLogger('madgraph.stderr') # ->stderr 
  61    
  62  try: 
  63      import madgraph 
  64  except ImportError:  
  65      aMCatNLO = True  
  66      import internal.extended_cmd as cmd 
  67      import internal.common_run_interface as common_run 
  68      import internal.banner as banner_mod 
  69      import internal.misc as misc     
  70      from internal import InvalidCmd, MadGraph5Error 
  71      import internal.files as files 
  72      import internal.cluster as cluster 
  73      import internal.save_load_object as save_load_object 
  74      import internal.gen_crossxhtml as gen_crossxhtml 
  75      import internal.sum_html as sum_html 
  76      import internal.shower_card as shower_card 
  77      import internal.FO_analyse_card as analyse_card  
  78      import internal.lhe_parser as lhe_parser 
  79  else: 
  80      # import from madgraph directory 
  81      aMCatNLO = False 
  82      import madgraph.interface.extended_cmd as cmd 
  83      import madgraph.interface.common_run_interface as common_run 
  84      import madgraph.iolibs.files as files 
  85      import madgraph.iolibs.save_load_object as save_load_object 
  86      import madgraph.madevent.gen_crossxhtml as gen_crossxhtml 
  87      import madgraph.madevent.sum_html as sum_html 
  88      import madgraph.various.banner as banner_mod 
  89      import madgraph.various.cluster as cluster 
  90      import madgraph.various.misc as misc 
  91      import madgraph.various.shower_card as shower_card 
  92      import madgraph.various.FO_analyse_card as analyse_card 
  93      import madgraph.various.lhe_parser as lhe_parser 
  94      from madgraph import InvalidCmd, aMCatNLOError, MadGraph5Error,MG5DIR 
95 96 -class aMCatNLOError(Exception):
97 pass
98
99 100 -def compile_dir(*arguments):
101 """compile the direcory p_dir 102 arguments is the tuple (me_dir, p_dir, mode, options, tests, exe, run_mode) 103 this function needs not to be a class method in order to do 104 the compilation on multicore""" 105 106 if len(arguments) == 1: 107 (me_dir, p_dir, mode, options, tests, exe, run_mode) = arguments[0] 108 elif len(arguments)==7: 109 (me_dir, p_dir, mode, options, tests, exe, run_mode) = arguments 110 else: 111 raise aMCatNLOError, 'Wrong number of arguments' 112 logger.info(' Compiling %s...' % p_dir) 113 114 this_dir = pjoin(me_dir, 'SubProcesses', p_dir) 115 116 try: 117 #compile everything 118 # compile and run tests 119 for test in tests: 120 # skip check_poles for LOonly dirs 121 if test == 'check_poles' and os.path.exists(pjoin(this_dir, 'parton_lum_0.f')): 122 continue 123 if test == 'test_ME' or test == 'test_MC': 124 test_exe='test_soft_col_limits' 125 else: 126 test_exe=test 127 misc.compile([test_exe], cwd = this_dir, job_specs = False) 128 input = pjoin(me_dir, '%s_input.txt' % test) 129 #this can be improved/better written to handle the output 130 misc.call(['./%s' % (test_exe)], cwd=this_dir, 131 stdin = open(input), stdout=open(pjoin(this_dir, '%s.log' % test), 'w'), 132 close_fds=True) 133 if test == 'check_poles' and os.path.exists(pjoin(this_dir,'MadLoop5_resources')) : 134 tf=tarfile.open(pjoin(this_dir,'MadLoop5_resources.tar.gz'),'w:gz', 135 dereference=True) 136 tf.add(pjoin(this_dir,'MadLoop5_resources'),arcname='MadLoop5_resources') 137 tf.close() 138 139 if not options['reweightonly']: 140 misc.compile(['gensym'], cwd=this_dir, job_specs = False) 141 misc.call(['./gensym', mode],cwd= this_dir, 142 stdout=open(pjoin(this_dir, 'gensym.log'), 'w'), 143 close_fds=True) 144 #compile madevent_mintMC/mintFO 145 misc.compile([exe], cwd=this_dir, job_specs = False) 146 if mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 147 misc.compile(['reweight_xsec_events'], cwd=this_dir, job_specs = False) 148 149 logger.info(' %s done.' % p_dir) 150 return 0 151 except MadGraph5Error, msg: 152 return msg
153
154 155 -def check_compiler(options, block=False):
156 """check that the current fortran compiler is gfortran 4.6 or later. 157 If block, stops the execution, otherwise just print a warning""" 158 159 msg = 'In order to be able to run MadGraph5_aMC@NLO at NLO, you need to have ' + \ 160 'gfortran 4.6 or later installed.\n%s has been detected.\n'+\ 161 'Note that you can still run MadEvent (at LO) without any problem!' 162 #first check that gfortran is installed 163 if options['fortran_compiler']: 164 compiler = options['fortran_compiler'] 165 elif misc.which('gfortran'): 166 compiler = 'gfortran' 167 else: 168 compiler = '' 169 170 if 'gfortran' not in compiler: 171 if block: 172 raise aMCatNLOError(msg % compiler) 173 else: 174 logger.warning(msg % compiler) 175 else: 176 curr_version = misc.get_gfortran_version(compiler) 177 if not ''.join(curr_version.split('.')) >= '46': 178 if block: 179 raise aMCatNLOError(msg % (compiler + ' ' + curr_version)) 180 else: 181 logger.warning(msg % (compiler + ' ' + curr_version))
182
183 184 185 #=============================================================================== 186 # CmdExtended 187 #=============================================================================== 188 -class CmdExtended(common_run.CommonRunCmd):
189 """Particularisation of the cmd command for aMCatNLO""" 190 191 #suggested list of command 192 next_possibility = { 193 'start': [], 194 } 195 196 debug_output = 'ME5_debug' 197 error_debug = 'Please report this bug on https://bugs.launchpad.net/mg5amcnlo\n' 198 error_debug += 'More information is found in \'%(debug)s\'.\n' 199 error_debug += 'Please attach this file to your report.' 200 201 config_debug = 'If you need help with this issue, please, contact us on https://answers.launchpad.net/mg5amcnlo\n' 202 203 204 keyboard_stop_msg = """stopping all operation 205 in order to quit MadGraph5_aMC@NLO please enter exit""" 206 207 # Define the Error 208 InvalidCmd = InvalidCmd 209 ConfigurationError = aMCatNLOError 210
211 - def __init__(self, me_dir, options, *arg, **opt):
212 """Init history and line continuation""" 213 214 # Tag allowing/forbiding question 215 self.force = False 216 217 # If possible, build an info line with current version number 218 # and date, from the VERSION text file 219 info = misc.get_pkg_info() 220 info_line = "" 221 if info and info.has_key('version') and info.has_key('date'): 222 len_version = len(info['version']) 223 len_date = len(info['date']) 224 if len_version + len_date < 30: 225 info_line = "#* VERSION %s %s %s *\n" % \ 226 (info['version'], 227 (30 - len_version - len_date) * ' ', 228 info['date']) 229 else: 230 version = open(pjoin(root_path,'MGMEVersion.txt')).readline().strip() 231 info_line = "#* VERSION %s %s *\n" % \ 232 (version, (24 - len(version)) * ' ') 233 234 # Create a header for the history file. 235 # Remember to fill in time at writeout time! 236 self.history_header = \ 237 '#************************************************************\n' + \ 238 '#* MadGraph5_aMC@NLO *\n' + \ 239 '#* *\n' + \ 240 "#* * * *\n" + \ 241 "#* * * * * *\n" + \ 242 "#* * * * * 5 * * * * *\n" + \ 243 "#* * * * * *\n" + \ 244 "#* * * *\n" + \ 245 "#* *\n" + \ 246 "#* *\n" + \ 247 info_line + \ 248 "#* *\n" + \ 249 "#* The MadGraph5_aMC@NLO Development Team - Find us at *\n" + \ 250 "#* https://server06.fynu.ucl.ac.be/projects/madgraph *\n" + \ 251 "#* and *\n" + \ 252 "#* http://amcatnlo.cern.ch *\n" + \ 253 '#* *\n' + \ 254 '#************************************************************\n' + \ 255 '#* *\n' + \ 256 '#* Command File for aMCatNLO *\n' + \ 257 '#* *\n' + \ 258 '#* run as ./bin/aMCatNLO.py filename *\n' + \ 259 '#* *\n' + \ 260 '#************************************************************\n' 261 262 if info_line: 263 info_line = info_line[1:] 264 265 logger.info(\ 266 "************************************************************\n" + \ 267 "* *\n" + \ 268 "* W E L C O M E to M A D G R A P H 5 *\n" + \ 269 "* a M C @ N L O *\n" + \ 270 "* *\n" + \ 271 "* * * *\n" + \ 272 "* * * * * *\n" + \ 273 "* * * * * 5 * * * * *\n" + \ 274 "* * * * * *\n" + \ 275 "* * * *\n" + \ 276 "* *\n" + \ 277 info_line + \ 278 "* *\n" + \ 279 "* The MadGraph5_aMC@NLO Development Team - Find us at *\n" + \ 280 "* http://amcatnlo.cern.ch *\n" + \ 281 "* *\n" + \ 282 "* Type 'help' for in-line help. *\n" + \ 283 "* *\n" + \ 284 "************************************************************") 285 super(CmdExtended, self).__init__(me_dir, options, *arg, **opt)
286 287
288 - def get_history_header(self):
289 """return the history header""" 290 return self.history_header % misc.get_time_info()
291
292 - def stop_on_keyboard_stop(self):
293 """action to perform to close nicely on a keyboard interupt""" 294 try: 295 if hasattr(self, 'cluster'): 296 logger.info('rm jobs on queue') 297 self.cluster.remove() 298 if hasattr(self, 'results'): 299 self.update_status('Stop by the user', level=None, makehtml=True, error=True) 300 self.add_error_log_in_html(KeyboardInterrupt) 301 except: 302 pass
303
304 - def postcmd(self, stop, line):
305 """ Update the status of the run for finishing interactive command """ 306 307 # relaxing the tag forbidding question 308 self.force = False 309 310 if not self.use_rawinput: 311 return stop 312 313 314 arg = line.split() 315 if len(arg) == 0: 316 return stop 317 elif str(arg[0]) in ['exit','quit','EOF']: 318 return stop 319 320 try: 321 self.update_status('Command \'%s\' done.<br> Waiting for instruction.' % arg[0], 322 level=None, error=True) 323 except Exception: 324 misc.sprint('self.update_status fails', log=logger) 325 pass
326
327 - def nice_user_error(self, error, line):
328 """If a ME run is currently running add a link in the html output""" 329 330 self.add_error_log_in_html() 331 cmd.Cmd.nice_user_error(self, error, line)
332
333 - def nice_config_error(self, error, line):
334 """If a ME run is currently running add a link in the html output""" 335 336 self.add_error_log_in_html() 337 cmd.Cmd.nice_config_error(self, error, line)
338
339 - def nice_error_handling(self, error, line):
340 """If a ME run is currently running add a link in the html output""" 341 342 self.add_error_log_in_html() 343 cmd.Cmd.nice_error_handling(self, error, line)
344
345 346 347 #=============================================================================== 348 # HelpToCmd 349 #=============================================================================== 350 -class HelpToCmd(object):
351 """ The Series of help routine for the aMCatNLOCmd""" 352
353 - def help_launch(self):
354 """help for launch command""" 355 _launch_parser.print_help()
356
357 - def help_banner_run(self):
358 logger.info("syntax: banner_run Path|RUN [--run_options]") 359 logger.info("-- Reproduce a run following a given banner") 360 logger.info(" One of the following argument is require:") 361 logger.info(" Path should be the path of a valid banner.") 362 logger.info(" RUN should be the name of a run of the current directory") 363 self.run_options_help([('-f','answer all question by default'), 364 ('--name=X', 'Define the name associated with the new run')])
365 366
367 - def help_compile(self):
368 """help for compile command""" 369 _compile_parser.print_help()
370
371 - def help_generate_events(self):
372 """help for generate_events commandi 373 just call help_launch""" 374 _generate_events_parser.print_help()
375 376
377 - def help_calculate_xsect(self):
378 """help for generate_events command""" 379 _calculate_xsect_parser.print_help()
380
381 - def help_shower(self):
382 """help for shower command""" 383 _shower_parser.print_help()
384 385
386 - def help_open(self):
387 logger.info("syntax: open FILE ") 388 logger.info("-- open a file with the appropriate editor.") 389 logger.info(' If FILE belongs to index.html, param_card.dat, run_card.dat') 390 logger.info(' the path to the last created/used directory is used')
391
392 - def run_options_help(self, data):
393 if data: 394 logger.info('-- local options:') 395 for name, info in data: 396 logger.info(' %s : %s' % (name, info)) 397 398 logger.info("-- session options:") 399 logger.info(" Note that those options will be kept for the current session") 400 logger.info(" --cluster : Submit to the cluster. Current cluster: %s" % self.options['cluster_type']) 401 logger.info(" --multicore : Run in multi-core configuration") 402 logger.info(" --nb_core=X : limit the number of core to use to X.")
403
404 405 406 407 #=============================================================================== 408 # CheckValidForCmd 409 #=============================================================================== 410 -class CheckValidForCmd(object):
411 """ The Series of check routine for the aMCatNLOCmd""" 412
413 - def check_shower(self, args, options):
414 """Check the validity of the line. args[0] is the run_directory""" 415 416 if options['force']: 417 self.force = True 418 419 if len(args) == 0: 420 self.help_shower() 421 raise self.InvalidCmd, 'Invalid syntax, please specify the run name' 422 if not os.path.isdir(pjoin(self.me_dir, 'Events', args[0])): 423 raise self.InvalidCmd, 'Directory %s does not exists' % \ 424 pjoin(os.getcwd(), 'Events', args[0]) 425 426 self.set_run_name(args[0], level= 'shower') 427 args[0] = pjoin(self.me_dir, 'Events', args[0])
428
429 - def check_plot(self, args):
430 """Check the argument for the plot command 431 plot run_name modes""" 432 433 434 madir = self.options['madanalysis_path'] 435 td = self.options['td_path'] 436 437 if not madir or not td: 438 logger.info('Retry to read configuration file to find madanalysis/td') 439 self.set_configuration() 440 441 madir = self.options['madanalysis_path'] 442 td = self.options['td_path'] 443 444 if not madir: 445 error_msg = 'No valid Madanalysis path set.' 446 error_msg += 'Please use the set command to define the path and retry.' 447 error_msg += 'You can also define it in the configuration file.' 448 raise self.InvalidCmd(error_msg) 449 if not td: 450 error_msg = 'No valid path to your topdrawer directory set.' 451 error_msg += 'Please use the set command to define the path and retry.' 452 error_msg += 'You can also define it in the configuration file.' 453 raise self.InvalidCmd(error_msg) 454 455 if len(args) == 0: 456 if not hasattr(self, 'run_name') or not self.run_name: 457 self.help_plot() 458 raise self.InvalidCmd('No run name defined. Please add this information.') 459 args.append('all') 460 return 461 462 463 if args[0] not in self._plot_mode: 464 self.set_run_name(args[0], level='plot') 465 del args[0] 466 if len(args) == 0: 467 args.append('all') 468 elif not self.run_name: 469 self.help_plot() 470 raise self.InvalidCmd('No run name defined. Please add this information.') 471 472 for arg in args: 473 if arg not in self._plot_mode and arg != self.run_name: 474 self.help_plot() 475 raise self.InvalidCmd('unknown options %s' % arg)
476
477 - def check_pgs(self, arg):
478 """Check the argument for pythia command 479 syntax: pgs [NAME] 480 Note that other option are already remove at this point 481 """ 482 483 # If not pythia-pgs path 484 if not self.options['pythia-pgs_path']: 485 logger.info('Retry to read configuration file to find pythia-pgs path') 486 self.set_configuration() 487 488 if not self.options['pythia-pgs_path'] or not \ 489 os.path.exists(pjoin(self.options['pythia-pgs_path'],'src')): 490 error_msg = 'No valid pythia-pgs path set.' 491 error_msg += 'Please use the set command to define the path and retry.' 492 error_msg += 'You can also define it in the configuration file.' 493 raise self.InvalidCmd(error_msg) 494 495 tag = [a for a in arg if a.startswith('--tag=')] 496 if tag: 497 arg.remove(tag[0]) 498 tag = tag[0][6:] 499 500 501 if len(arg) == 0 and not self.run_name: 502 if self.results.lastrun: 503 arg.insert(0, self.results.lastrun) 504 else: 505 raise self.InvalidCmd('No run name defined. Please add this information.') 506 507 if len(arg) == 1 and self.run_name == arg[0]: 508 arg.pop(0) 509 510 if not len(arg) and \ 511 not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): 512 self.help_pgs() 513 raise self.InvalidCmd('''No file pythia_events.hep currently available. Please specify a valid run_name''') 514 515 lock = None 516 if len(arg) == 1: 517 prev_tag = self.set_run_name(arg[0], tag, 'pgs') 518 filenames = misc.glob('events_*.hep.gz', pjoin(self.me_dir, 'Events', self.run_name)) 519 520 if not filenames: 521 raise self.InvalidCmd('No event file corresponding to %s run with tag %s. '% (self.run_name, prev_tag)) 522 else: 523 input_file = filenames[0] 524 output_file = pjoin(self.me_dir, 'Events', 'pythia_events.hep') 525 lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), 526 argument=['-c', input_file], 527 close_fds=True) 528 else: 529 if tag: 530 self.run_card['run_tag'] = tag 531 self.set_run_name(self.run_name, tag, 'pgs') 532 533 return lock
534 535
536 - def check_delphes(self, arg):
537 """Check the argument for pythia command 538 syntax: delphes [NAME] 539 Note that other option are already remove at this point 540 """ 541 542 # If not pythia-pgs path 543 if not self.options['delphes_path']: 544 logger.info('Retry to read configuration file to find delphes path') 545 self.set_configuration() 546 547 if not self.options['delphes_path']: 548 error_msg = 'No valid delphes path set.' 549 error_msg += 'Please use the set command to define the path and retry.' 550 error_msg += 'You can also define it in the configuration file.' 551 raise self.InvalidCmd(error_msg) 552 553 tag = [a for a in arg if a.startswith('--tag=')] 554 if tag: 555 arg.remove(tag[0]) 556 tag = tag[0][6:] 557 558 559 if len(arg) == 0 and not self.run_name: 560 if self.results.lastrun: 561 arg.insert(0, self.results.lastrun) 562 else: 563 raise self.InvalidCmd('No run name defined. Please add this information.') 564 565 if len(arg) == 1 and self.run_name == arg[0]: 566 arg.pop(0) 567 568 if not len(arg) and \ 569 not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): 570 self.help_pgs() 571 raise self.InvalidCmd('''No file pythia_events.hep currently available. Please specify a valid run_name''') 572 573 if len(arg) == 1: 574 prev_tag = self.set_run_name(arg[0], tag, 'delphes') 575 filenames = misc.glob('events_*.hep.gz', pjoin(self.me_dir, 'Events')) 576 577 578 if not filenames: 579 raise self.InvalidCmd('No event file corresponding to %s run with tag %s.:%s '\ 580 % (self.run_name, prev_tag, 581 pjoin(self.me_dir,'Events',self.run_name, '%s_pythia_events.hep.gz' % prev_tag))) 582 else: 583 input_file = filenames[0] 584 output_file = pjoin(self.me_dir, 'Events', 'pythia_events.hep') 585 lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), 586 argument=['-c', input_file], 587 close_fds=True) 588 else: 589 if tag: 590 self.run_card['run_tag'] = tag 591 self.set_run_name(self.run_name, tag, 'delphes')
592
593 - def check_calculate_xsect(self, args, options):
594 """check the validity of the line. args is ORDER, 595 ORDER being LO or NLO. If no mode is passed, NLO is used""" 596 # modify args in order to be DIR 597 # mode being either standalone or madevent 598 599 if options['force']: 600 self.force = True 601 602 if not args: 603 args.append('NLO') 604 return 605 606 if len(args) > 1: 607 self.help_calculate_xsect() 608 raise self.InvalidCmd, 'Invalid Syntax: too many arguments' 609 610 elif len(args) == 1: 611 if not args[0] in ['NLO', 'LO']: 612 raise self.InvalidCmd, '%s is not a valid mode, please use "LO" or "NLO"' % args[1] 613 mode = args[0] 614 615 # check for incompatible options/modes 616 if options['multicore'] and options['cluster']: 617 raise self.InvalidCmd, 'options -m (--multicore) and -c (--cluster)' + \ 618 ' are not compatible. Please choose one.'
619 620
621 - def check_generate_events(self, args, options):
622 """check the validity of the line. args is ORDER, 623 ORDER being LO or NLO. If no mode is passed, NLO is used""" 624 # modify args in order to be DIR 625 # mode being either standalone or madevent 626 627 if not args: 628 args.append('NLO') 629 return 630 631 if len(args) > 1: 632 self.help_generate_events() 633 raise self.InvalidCmd, 'Invalid Syntax: too many arguments' 634 635 elif len(args) == 1: 636 if not args[0] in ['NLO', 'LO']: 637 raise self.InvalidCmd, '%s is not a valid mode, please use "LO" or "NLO"' % args[1] 638 mode = args[0] 639 640 # check for incompatible options/modes 641 if options['multicore'] and options['cluster']: 642 raise self.InvalidCmd, 'options -m (--multicore) and -c (--cluster)' + \ 643 ' are not compatible. Please choose one.'
644
645 - def check_banner_run(self, args):
646 """check the validity of line""" 647 648 if len(args) == 0: 649 self.help_banner_run() 650 raise self.InvalidCmd('banner_run requires at least one argument.') 651 652 tag = [a[6:] for a in args if a.startswith('--tag=')] 653 654 655 if os.path.exists(args[0]): 656 type ='banner' 657 format = self.detect_card_type(args[0]) 658 if format != 'banner': 659 raise self.InvalidCmd('The file is not a valid banner.') 660 elif tag: 661 args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ 662 (args[0], tag)) 663 if not os.path.exists(args[0]): 664 raise self.InvalidCmd('No banner associates to this name and tag.') 665 else: 666 name = args[0] 667 type = 'run' 668 banners = misc.glob('*_banner.txt', pjoin(self.me_dir,'Events', args[0])) 669 if not banners: 670 raise self.InvalidCmd('No banner associates to this name.') 671 elif len(banners) == 1: 672 args[0] = banners[0] 673 else: 674 #list the tag and propose those to the user 675 tags = [os.path.basename(p)[len(args[0])+1:-11] for p in banners] 676 tag = self.ask('which tag do you want to use?', tags[0], tags) 677 args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ 678 (args[0], tag)) 679 680 run_name = [arg[7:] for arg in args if arg.startswith('--name=')] 681 if run_name: 682 try: 683 self.exec_cmd('remove %s all banner -f' % run_name) 684 except Exception: 685 pass 686 self.set_run_name(args[0], tag=None, level='parton', reload_card=True) 687 elif type == 'banner': 688 self.set_run_name(self.find_available_run_name(self.me_dir)) 689 elif type == 'run': 690 if not self.results[name].is_empty(): 691 run_name = self.find_available_run_name(self.me_dir) 692 logger.info('Run %s is not empty so will use run_name: %s' % \ 693 (name, run_name)) 694 self.set_run_name(run_name) 695 else: 696 try: 697 self.exec_cmd('remove %s all banner -f' % run_name) 698 except Exception: 699 pass 700 self.set_run_name(name)
701 702 703
704 - def check_launch(self, args, options):
705 """check the validity of the line. args is MODE 706 MODE being LO, NLO, aMC@NLO or aMC@LO. If no mode is passed, auto is used""" 707 # modify args in order to be DIR 708 # mode being either standalone or madevent 709 710 if options['force']: 711 self.force = True 712 713 714 if not args: 715 args.append('auto') 716 return 717 718 if len(args) > 1: 719 self.help_launch() 720 raise self.InvalidCmd, 'Invalid Syntax: too many arguments' 721 722 elif len(args) == 1: 723 if not args[0] in ['LO', 'NLO', 'aMC@NLO', 'aMC@LO','auto']: 724 raise self.InvalidCmd, '%s is not a valid mode, please use "LO", "NLO", "aMC@NLO" or "aMC@LO"' % args[0] 725 mode = args[0] 726 727 # check for incompatible options/modes 728 if options['multicore'] and options['cluster']: 729 raise self.InvalidCmd, 'options -m (--multicore) and -c (--cluster)' + \ 730 ' are not compatible. Please choose one.' 731 if mode == 'NLO' and options['reweightonly']: 732 raise self.InvalidCmd, 'option -r (--reweightonly) needs mode "aMC@NLO" or "aMC@LO"'
733 734
735 - def check_compile(self, args, options):
736 """check the validity of the line. args is MODE 737 MODE being FO or MC. If no mode is passed, MC is used""" 738 # modify args in order to be DIR 739 # mode being either standalone or madevent 740 741 if options['force']: 742 self.force = True 743 744 if not args: 745 args.append('MC') 746 return 747 748 if len(args) > 1: 749 self.help_compile() 750 raise self.InvalidCmd, 'Invalid Syntax: too many arguments' 751 752 elif len(args) == 1: 753 if not args[0] in ['MC', 'FO']: 754 raise self.InvalidCmd, '%s is not a valid mode, please use "FO" or "MC"' % args[0] 755 mode = args[0]
756
757 # check for incompatible options/modes 758 759 760 #=============================================================================== 761 # CompleteForCmd 762 #=============================================================================== 763 -class CompleteForCmd(CheckValidForCmd):
764 """ The Series of help routine for the MadGraphCmd""" 765
766 - def complete_launch(self, text, line, begidx, endidx):
767 """auto-completion for launch command""" 768 769 args = self.split_arg(line[0:begidx]) 770 if len(args) == 1: 771 #return mode 772 return self.list_completion(text,['LO','NLO','aMC@NLO','aMC@LO'],line) 773 elif len(args) == 2 and line[begidx-1] == '@': 774 return self.list_completion(text,['LO','NLO'],line) 775 else: 776 opts = [] 777 for opt in _launch_parser.option_list: 778 opts += opt._long_opts + opt._short_opts 779 return self.list_completion(text, opts, line)
780
781 - def complete_banner_run(self, text, line, begidx, endidx, formatting=True):
782 "Complete the banner run command" 783 try: 784 785 786 args = self.split_arg(line[0:begidx], error=False) 787 788 if args[-1].endswith(os.path.sep): 789 return self.path_completion(text, 790 os.path.join('.',*[a for a in args \ 791 if a.endswith(os.path.sep)])) 792 793 794 if len(args) > 1: 795 # only options are possible 796 tags = misc.glob('%s_*_banner.txt' % args[1],pjoin(self.me_dir, 'Events' , args[1])) 797 tags = ['%s' % os.path.basename(t)[len(args[1])+1:-11] for t in tags] 798 799 if args[-1] != '--tag=': 800 tags = ['--tag=%s' % t for t in tags] 801 else: 802 return self.list_completion(text, tags) 803 return self.list_completion(text, tags +['--name=','-f'], line) 804 805 # First argument 806 possibilites = {} 807 808 comp = self.path_completion(text, os.path.join('.',*[a for a in args \ 809 if a.endswith(os.path.sep)])) 810 if os.path.sep in line: 811 return comp 812 else: 813 possibilites['Path from ./'] = comp 814 815 run_list = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) 816 run_list = [n.rsplit('/',2)[1] for n in run_list] 817 possibilites['RUN Name'] = self.list_completion(text, run_list) 818 819 return self.deal_multiple_categories(possibilites, formatting) 820 821 822 except Exception, error: 823 print error
824 825
826 - def complete_compile(self, text, line, begidx, endidx):
827 """auto-completion for launch command""" 828 829 args = self.split_arg(line[0:begidx]) 830 if len(args) == 1: 831 #return mode 832 return self.list_completion(text,['FO','MC'],line) 833 else: 834 opts = [] 835 for opt in _compile_parser.option_list: 836 opts += opt._long_opts + opt._short_opts 837 return self.list_completion(text, opts, line)
838
839 - def complete_calculate_xsect(self, text, line, begidx, endidx):
840 """auto-completion for launch command""" 841 842 args = self.split_arg(line[0:begidx]) 843 if len(args) == 1: 844 #return mode 845 return self.list_completion(text,['LO','NLO'],line) 846 else: 847 opts = [] 848 for opt in _calculate_xsect_parser.option_list: 849 opts += opt._long_opts + opt._short_opts 850 return self.list_completion(text, opts, line)
851
852 - def complete_generate_events(self, text, line, begidx, endidx):
853 """auto-completion for generate_events command 854 call the compeltion for launch""" 855 self.complete_launch(text, line, begidx, endidx)
856 857
858 - def complete_shower(self, text, line, begidx, endidx):
859 args = self.split_arg(line[0:begidx]) 860 if len(args) == 1: 861 #return valid run_name 862 data = misc.glob(pjoin('*','events.lhe.gz', pjoin(self.me_dir, 'Events'))) 863 data = [n.rsplit('/',2)[1] for n in data] 864 tmp1 = self.list_completion(text, data) 865 if not self.run_name: 866 return tmp1
867
868 - def complete_plot(self, text, line, begidx, endidx):
869 """ Complete the plot command """ 870 871 args = self.split_arg(line[0:begidx], error=False) 872 873 if len(args) == 1: 874 #return valid run_name 875 data = misc.glob(pjoin('*','events.lhe*', pjoin(self.me_dir, 'Events'))) 876 data = [n.rsplit('/',2)[1] for n in data] 877 tmp1 = self.list_completion(text, data) 878 if not self.run_name: 879 return tmp1 880 881 if len(args) > 1: 882 return self.list_completion(text, self._plot_mode)
883
884 - def complete_pgs(self,text, line, begidx, endidx):
885 "Complete the pgs command" 886 args = self.split_arg(line[0:begidx], error=False) 887 if len(args) == 1: 888 #return valid run_name 889 data = misc.glob(pjoin('*', 'events_*.hep.gz'), 890 pjoin(self.me_dir, 'Events')) 891 data = [n.rsplit('/',2)[1] for n in data] 892 tmp1 = self.list_completion(text, data) 893 if not self.run_name: 894 return tmp1 895 else: 896 tmp2 = self.list_completion(text, self._run_options + ['-f', 897 '--tag=' ,'--no_default'], line) 898 return tmp1 + tmp2 899 else: 900 return self.list_completion(text, self._run_options + ['-f', 901 '--tag=','--no_default'], line)
902 903 complete_delphes = complete_pgs
904
905 -class aMCatNLOAlreadyRunning(InvalidCmd):
906 pass
907
908 -class AskRunNLO(cmd.ControlSwitch):
909 910 to_control = [('order', 'Type of perturbative computation'), 911 ('fixed_order', 'No MC@[N]LO matching / event generation'), 912 ('shower', 'Shower the generated events'), 913 ('madspin', 'Decay onshell particles'), 914 ('reweight', 'Add weights to events for new hypp.'), 915 ('madanalysis','Run MadAnalysis5 on the events generated')] 916 917 quit_on = cmd.ControlSwitch.quit_on + ['onlyshower'] 918
919 - def __init__(self, question, line_args=[], mode=None, force=False, 920 *args, **opt):
921 922 923 924 self.check_available_module(opt['mother_interface'].options) 925 self.me_dir = opt['mother_interface'].me_dir 926 self.last_mode = opt['mother_interface'].last_mode 927 self.proc_characteristics = opt['mother_interface'].proc_characteristics 928 self.run_card = banner_mod.RunCard(pjoin(self.me_dir,'Cards', 'run_card.dat')) 929 930 hide_line = [] 931 if 'QED' in self.proc_characteristics['splitting_types']: 932 hide_line = ['madspin', 'shower', 'reweight', 'madanalysis'] 933 934 super(AskRunNLO,self).__init__(self.to_control, opt['mother_interface'], 935 *args, hide_line=hide_line, force=force, 936 **opt)
937 938 @property
939 - def answer(self):
940 941 out = super(AskRunNLO, self).answer 942 if out['shower'] == 'HERWIG7': 943 out['shower'] = 'HERWIGPP' 944 945 if out['shower'] not in self.get_allowed('shower') or out['shower'] =='OFF': 946 out['runshower'] = False 947 else: 948 out['runshower'] = True 949 return out
950
951 - def default(self,*args, **opts):
952 self.nb_fo_warning = 0 953 super(AskRunNLO, self).default(*args, **opts)
954
955 - def check_available_module(self, options):
956 957 self.available_module = set() 958 if options['madanalysis5_path']: 959 self.available_module.add('MA5') 960 if not aMCatNLO or ('mg5_path' in options and options['mg5_path']): 961 962 self.available_module.add('MadSpin') 963 if misc.has_f2py() or options['f2py_compiler']: 964 self.available_module.add('reweight') 965 if options['pythia8_path']: 966 self.available_module.add('PY8') 967 if options['hwpp_path'] and options['thepeg_path'] and options['hepmc_path']: 968 self.available_module.add('HW7')
969 # 970 # shorcut 971 #
972 - def ans_lo(self, value):
973 """ function called if the user type lo=value. or lo (then value is None)""" 974 975 if value is None: 976 self.switch['order'] = 'LO' 977 self.switch['fixed_order'] = 'ON' 978 self.set_switch('shower', 'OFF') 979 else: 980 logger.warning('Invalid command: lo=%s' % value)
981
982 - def ans_nlo(self, value):
983 if value is None: 984 self.switch['order'] = 'NLO' 985 self.switch['fixed_order'] = 'ON' 986 self.set_switch('shower', 'OFF') 987 else: 988 logger.warning('Invalid command: nlo=%s' % value)
989
990 - def ans_amc__at__nlo(self, value):
991 if value is None: 992 self.switch['order'] = 'NLO' 993 self.switch['fixed_order'] = 'OFF' 994 self.set_switch('shower', 'ON') 995 else: 996 logger.warning('Invalid command: aMC@NLO=%s' % value)
997
998 - def ans_amc__at__lo(self, value):
999 if value is None: 1000 self.switch['order'] = 'LO' 1001 self.switch['fixed_order'] = 'OFF' 1002 self.set_switch('shower', 'ON') 1003 else: 1004 logger.warning('Invalid command: aMC@LO=%s' % value)
1005
1006 - def ans_noshower(self, value):
1007 if value is None: 1008 self.switch['order'] = 'NLO' 1009 self.switch['fixed_order'] = 'OFF' 1010 self.set_switch('shower', 'OFF') 1011 else: 1012 logger.warning('Invalid command: noshower=%s' % value)
1013
1014 - def ans_onlyshower(self, value):
1015 if value is None: 1016 self.switch['mode'] = 'onlyshower' 1017 self.switch['madspin'] = 'OFF' 1018 self.switch['reweight'] = 'OFF' 1019 else: 1020 logger.warning('Invalid command: onlyshower=%s' % value)
1021
1022 - def ans_noshowerlo(self, value):
1023 if value is None: 1024 self.switch['order'] = 'LO' 1025 self.switch['fixed_order'] = 'OFF' 1026 self.set_switch('shower', 'OFF') 1027 else: 1028 logger.warning('Invalid command: noshowerlo=%s' % value)
1029
1030 - def ans_madanalysis5(self, value):
1031 """ shortcut madanalysis5 -> madanalysis """ 1032 1033 if value is None: 1034 return self.onecmd('madanalysis') 1035 else: 1036 self.set_switch('madanalysis', value)
1037 # 1038 # ORDER 1039 #
1040 - def get_allowed_order(self):
1041 return ["LO", "NLO"]
1042
1043 - def set_default_order(self):
1044 1045 if self.last_mode in ['LO', 'aMC@L0', 'noshowerLO']: 1046 self.switch['order'] = 'LO' 1047 self.switch['order'] = 'NLO'
1048
1049 - def set_switch_off_order(self):
1050 return
1051 # 1052 # Fix order 1053 #
1054 - def get_allowed_fixed_order(self):
1055 """ """ 1056 1057 if self.proc_characteristics['ninitial'] == 1 or \ 1058 'QED' in self.proc_characteristics['splitting_types']: 1059 return ['ON'] 1060 else: 1061 return ['ON', 'OFF']
1062
1063 - def set_default_fixed_order(self):
1064 1065 if self.last_mode in ['LO', 'NLO']: 1066 self.switch['fixed_order'] = 'ON' 1067 elif self.proc_characteristics['ninitial'] == 1: 1068 self.switch['fixed_order'] = 'ON' 1069 elif 'QED' in self.proc_characteristics['splitting_types']: 1070 self.switch['fixed_order'] = 'ON' 1071 else: 1072 self.switch['fixed_order'] = 'OFF'
1073
1074 - def color_for_fixed_order(self, switch_value):
1075 1076 if switch_value in ['OFF']: 1077 return self.green % switch_value 1078 else: 1079 return self.red % switch_value
1080
1081 - def print_options_fixed_order(self):
1082 1083 if 'QED' in self.proc_characteristics['splitting_types']: 1084 return "No NLO+PS available for EW correction" 1085 else: 1086 return self.print_options('fixed_order', keep_default=True)
1087
1088 - def color_for_shower(self, switch_value):
1089 1090 if switch_value in ['ON']: 1091 return self.green % switch_value 1092 elif switch_value in self.get_allowed('shower'): 1093 return self.green % switch_value 1094 else: 1095 return self.red % switch_value
1096 1097
1098 - def consistency_QED(self, key, value, switch):
1099 """ temporary way to forbid event generation due to lack of validation""" 1100 1101 # if True: 1102 if 'QED' in self.proc_characteristics['splitting_types']: 1103 out = {} 1104 to_check ={'fixed_order': ['ON'], 1105 'shower': ['OFF'], 1106 'madanalysis': ['OFF'], 1107 'madspin': ['OFF','onshell','none'], 1108 'reweight': ['OFF']} 1109 for key, allowed in to_check.items(): 1110 if switch[key] not in allowed: 1111 out[key] = allowed[0] 1112 if not self.nb_fo_warning: 1113 if 'QED' in self.proc_characteristics['splitting_types']: 1114 logger.warning("NLO+PS mode is not allowed for processes including electroweak corrections") 1115 self.nb_fo_warning = 1 1116 else: 1117 return self.check_consistency_with_all(key, value) 1118 return out
1119 #apply to all related to the group 1120 consistency_fixed_order = lambda self, *args, **opts: self.consistency_QED('fixed_order', *args, **opts) 1121 consistency_shower = lambda self, *args, **opts: self.consistency_QED('shower', *args, **opts) 1122 consistency_madanalysis = lambda self, *args, **opts: self.consistency_QED('madanalysis', *args, **opts) 1123 consistency_madspin = lambda self, *args, **opts: self.consistency_QED('madspin', *args, **opts) 1124 consistency_reweight = lambda self, *args, **opts: self.consistency_QED('reweight', *args, **opts) 1125
1126 - def consistency_fixed_order_shower(self, vfix, vshower):
1127 """ consistency_XX_YY(val_XX, val_YY) 1128 -> XX is the new key set by the user to a new value val_XX 1129 -> YY is another key set by the user. 1130 -> return value should be None or "replace_YY" 1131 """ 1132 1133 if vfix == 'ON' and vshower != 'OFF' : 1134 return 'OFF' 1135 return None
1136 1137 consistency_fixed_order_madspin = consistency_fixed_order_shower 1138 consistency_fixed_order_reweight = consistency_fixed_order_shower 1139 1140
1141 - def consistency_fixed_order_madanalysis(self, vfix, vma5):
1142 1143 if vfix == 'ON' and vma5 == 'ON' : 1144 return 'OFF' 1145 return None
1146 1147
1148 - def consistency_shower_fixed_order(self, vshower, vfix):
1149 """ consistency_XX_YY(val_XX, val_YY) 1150 -> XX is the new key set by the user to a new value val_XX 1151 -> YY is another key set by the user. 1152 -> return value should be None or "replace_YY" 1153 """ 1154 1155 if vshower != 'OFF' and vfix == 'ON': 1156 return 'OFF' 1157 return None
1158 1159 consistency_madspin_fixed_order = consistency_shower_fixed_order 1160 consistency_reweight_fixed_order = consistency_shower_fixed_order 1161 consistency_madanalysis_fixed_order = consistency_shower_fixed_order 1162 1163 1164 # 1165 # Shower 1166 #
1167 - def get_allowed_shower(self):
1168 """ """ 1169 1170 if hasattr(self, 'allowed_shower'): 1171 return self.allowed_shower 1172 1173 if 'QED' in self.proc_characteristics['splitting_types']: 1174 self.allowed_shower = ['OFF'] 1175 return self.allowed_shower 1176 1177 if self.proc_characteristics['ninitial'] == 1: 1178 self.allowed_shower = ['OFF'] 1179 return ['OFF'] 1180 else: 1181 allowed = ['HERWIG6','OFF', 'PYTHIA6Q', 'PYTHIA6PT', ] 1182 if 'PY8' in self.available_module: 1183 allowed.append('PYTHIA8') 1184 if 'HW7' in self.available_module: 1185 allowed.append('HERWIGPP') 1186 1187 self.allowed_shower = allowed 1188 1189 return allowed
1190
1191 - def check_value_shower(self, value):
1192 """ """ 1193 1194 if value.upper() in self.get_allowed_shower(): 1195 return True 1196 if value.upper() in ['PYTHIA8', 'HERWIGPP']: 1197 return True 1198 if value.upper() == 'ON': 1199 return self.run_card['parton_shower'] 1200 if value.upper() in ['P8','PY8','PYTHIA_8']: 1201 return 'PYTHIA8' 1202 if value.upper() in ['PY6','P6','PY6PT', 'PYTHIA_6', 'PYTHIA_6PT','PYTHIA6PT','PYTHIA6_PT']: 1203 return 'PYTHIA6PT' 1204 if value.upper() in ['PY6Q', 'PYTHIA_6Q','PYTHIA6Q', 'PYTHIA6_Q']: 1205 return 'PYTHIA6Q' 1206 if value.upper() in ['HW7', 'HERWIG7']: 1207 return 'HERWIG7' 1208 if value.upper() in ['HW++', 'HWPP', 'HERWIG++']: 1209 return 'HERWIGPP' 1210 if value.upper() in ['HW6', 'HERWIG_6']: 1211 return 'HERWIG6'
1212
1213 - def set_default_shower(self):
1214 1215 if 'QED' in self.proc_characteristics['splitting_types']: 1216 self.switch['shower'] = 'Not Avail' 1217 elif self.last_mode in ['LO', 'NLO', 'noshower', 'noshowerLO']: 1218 self.switch['shower'] = 'OFF' 1219 elif self.proc_characteristics['ninitial'] == 1: 1220 self.switch['shower'] = 'OFF' 1221 elif os.path.exists(pjoin(self.me_dir, 'Cards', 'shower_card.dat')): 1222 if self.switch['fixed_order'] == "OFF": 1223 self.switch['shower'] = self.run_card['parton_shower'] 1224 elif self.switch['fixed_order'] == "ON": 1225 self.switch['shower'] = "OFF" 1226 else: 1227 if self.switch['fixed_order'] == "ON": 1228 self.switch['shower'] = 'OFF' 1229 else: 1230 self.switch['shower'] = 'OFF (%s)' % self.run_card['parton_shower'] 1231
1232 - def consistency_shower_madanalysis(self, vshower, vma5):
1233 """ MA5 only possible with (N)LO+PS if shower is run""" 1234 1235 if vshower == 'OFF' and vma5 == 'ON': 1236 return 'OFF' 1237 return None
1238
1239 - def consistency_madanalysis_shower(self, vma5, vshower):
1240 1241 if vma5=='ON' and vshower == 'OFF': 1242 return 'ON' 1243 return None
1244
1245 - def get_cardcmd_for_shower(self, value):
1246 """ adpat run_card according to this setup. return list of cmd to run""" 1247 1248 if value != 'OFF': 1249 return ['set parton_shower %s' % self.switch['shower']] 1250 return []
1251 1252 # 1253 # madspin 1254 #
1255 - def get_allowed_madspin(self):
1256 """ """ 1257 1258 if hasattr(self, 'allowed_madspin'): 1259 return self.allowed_madspin 1260 1261 self.allowed_madspin = [] 1262 1263 1264 if 'MadSpin' not in self.available_module: 1265 return self.allowed_madspin 1266 if self.proc_characteristics['ninitial'] == 1: 1267 self.available_module.remove('MadSpin') 1268 self.allowed_madspin = ['OFF'] 1269 return self.allowed_madspin 1270 else: 1271 if 'QED' in self.proc_characteristics['splitting_types']: 1272 self.allowed_madspin = ['OFF', 'onshell'] 1273 else: 1274 self.allowed_madspin = ['OFF', 'ON', 'onshell'] 1275 return self.allowed_madspin
1276
1277 - def check_value_madspin(self, value):
1278 """handle alias and valid option not present in get_allowed_madspin 1279 remember that this mode should always be OFF for 1>N. (ON not in allowed value)""" 1280 1281 if value.upper() in self.get_allowed_madspin(): 1282 if value == value.upper(): 1283 return True 1284 else: 1285 return value.upper() 1286 elif value.lower() in self.get_allowed_madspin(): 1287 if value == value.lower(): 1288 return True 1289 else: 1290 return value.lower() 1291 1292 if 'MadSpin' not in self.available_module or \ 1293 'ON' not in self.get_allowed_madspin(): 1294 return False 1295 1296 if value.lower() in ['madspin', 'full']: 1297 return 'full' 1298 elif value.lower() in ['none']: 1299 return 'none'
1300
1301 - def set_default_madspin(self):
1302 1303 if 'MadSpin' in self.available_module: 1304 if os.path.exists(pjoin(self.me_dir,'Cards','madspin_card.dat')): 1305 self.switch['madspin'] = 'ON' 1306 else: 1307 self.switch['madspin'] = 'OFF' 1308 else: 1309 self.switch['madspin'] = 'Not Avail'
1310
1311 - def get_cardcmd_for_madspin(self, value):
1312 """set some command to run before allowing the user to modify the cards.""" 1313 1314 if value == 'onshell': 1315 return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode onshell"] 1316 elif value in ['full', 'madspin']: 1317 return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode madspin"] 1318 elif value == 'none': 1319 return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode none"] 1320 else: 1321 return []
1322 1323 # 1324 # reweight 1325 #
1326 - def get_allowed_reweight(self):
1327 """set the valid (visible) options for reweight""" 1328 1329 if hasattr(self, 'allowed_reweight'): 1330 return getattr(self, 'allowed_reweight') 1331 1332 1333 1334 self.allowed_reweight = [] 1335 if 'QED' in self.proc_characteristics['splitting_types']: 1336 return self.allowed_reweight 1337 if 'reweight' not in self.available_module: 1338 return self.allowed_reweight 1339 if self.proc_characteristics['ninitial'] == 1: 1340 self.available_module.remove('reweight') 1341 self.allowed_reweight.append('OFF') 1342 return self.allowed_reweight 1343 else: 1344 self.allowed_reweight = [ 'OFF', 'ON', 'NLO', 'NLO_TREE','LO'] 1345 return self.allowed_reweight
1346
1347 - def set_default_reweight(self):
1348 """initialise the switch for reweight""" 1349 1350 if 'QED' in self.proc_characteristics['splitting_types']: 1351 self.switch['reweight'] = 'Not Avail' 1352 elif 'reweight' in self.available_module: 1353 if os.path.exists(pjoin(self.me_dir,'Cards','reweight_card.dat')): 1354 self.switch['reweight'] = 'ON' 1355 else: 1356 self.switch['reweight'] = 'OFF' 1357 else: 1358 self.switch['reweight'] = 'Not Avail'
1359
1360 - def get_cardcmd_for_reweight(self, value):
1361 """ adpat run_card according to this setup. return list of cmd to run""" 1362 1363 if value == 'LO': 1364 return ["edit reweight_card --replace_line='change mode' --before_line='launch' change mode LO"] 1365 elif value == 'NLO': 1366 return ["edit reweight_card --replace_line='change mode' --before_line='launch' change mode NLO", 1367 "set store_rwgt_info T"] 1368 elif value == 'NLO_TREE': 1369 return ["edit reweight_card --replace_line='change mode' --before_line='launch' change mode NLO_tree", 1370 "set store_rwgt_info T"] 1371 return []
1372 1373 # 1374 # MadAnalysis5 1375 #
1376 - def get_allowed_madanalysis(self):
1377 1378 if hasattr(self, 'allowed_madanalysis'): 1379 return self.allowed_madanalysis 1380 1381 self.allowed_madanalysis = [] 1382 1383 if 'QED' in self.proc_characteristics['splitting_types']: 1384 return self.allowed_madanalysis 1385 1386 if 'MA5' not in self.available_module: 1387 return self.allowed_madanalysis 1388 1389 if self.proc_characteristics['ninitial'] == 1: 1390 self.available_module.remove('MA5') 1391 self.allowed_madanalysis = ['OFF'] 1392 return self.allowed_madanalysis 1393 else: 1394 self.allowed_madanalysis = ['OFF', 'ON'] 1395 return self.allowed_madanalysis
1396
1397 - def set_default_madanalysis(self):
1398 """initialise the switch for reweight""" 1399 1400 if 'QED' in self.proc_characteristics['splitting_types']: 1401 self.switch['madanalysis'] = 'Not Avail' 1402 elif 'MA5' not in self.available_module: 1403 self.switch['madanalysis'] = 'Not Avail' 1404 elif os.path.exists(pjoin(self.me_dir,'Cards', 'madanalysis5_hadron_card.dat')): 1405 self.switch['madanalysis'] = 'ON' 1406 else: 1407 self.switch['madanalysis'] = 'OFF'
1408
1409 - def check_value_madanalysis(self, value):
1410 """check an entry is valid. return the valid entry in case of shortcut""" 1411 1412 if value.upper() in self.get_allowed('madanalysis'): 1413 return True 1414 value = value.lower() 1415 if value == 'hadron': 1416 return 'ON' if 'ON' in self.get_allowed_madanalysis5 else False 1417 else: 1418 return False
1419
1420 1421 #=============================================================================== 1422 # aMCatNLOCmd 1423 #=============================================================================== 1424 -class aMCatNLOCmd(CmdExtended, HelpToCmd, CompleteForCmd, common_run.CommonRunCmd):
1425 """The command line processor of MadGraph""" 1426 1427 # Truth values 1428 true = ['T','.true.',True,'true'] 1429 # Options and formats available 1430 _run_options = ['--cluster','--multicore','--nb_core=','--nb_core=2', '-c', '-m'] 1431 _generate_options = ['-f', '--laststep=parton', '--laststep=pythia', '--laststep=pgs', '--laststep=delphes'] 1432 _calculate_decay_options = ['-f', '--accuracy=0.'] 1433 _set_options = ['stdout_level','fortran_compiler','cpp_compiler','timeout'] 1434 _plot_mode = ['all', 'parton','shower','pgs','delphes'] 1435 _clean_mode = _plot_mode + ['channel', 'banner'] 1436 _display_opts = ['run_name', 'options', 'variable'] 1437 # survey options, dict from name to type, default value, and help text 1438 # Variables to store object information 1439 web = False 1440 cluster_mode = 0 1441 queue = 'madgraph' 1442 nb_core = None 1443 make_opts_var = {} 1444 1445 next_possibility = { 1446 'start': ['generate_events [OPTIONS]', 'calculate_crossx [OPTIONS]', 'launch [OPTIONS]', 1447 'help generate_events'], 1448 'generate_events': ['generate_events [OPTIONS]', 'shower'], 1449 'launch': ['launch [OPTIONS]', 'shower'], 1450 'shower' : ['generate_events [OPTIONS]'] 1451 } 1452 1453 1454 ############################################################################
1455 - def __init__(self, me_dir = None, options = {}, *completekey, **stdin):
1456 """ add information to the cmd """ 1457 1458 self.start_time = 0 1459 CmdExtended.__init__(self, me_dir, options, *completekey, **stdin) 1460 #common_run.CommonRunCmd.__init__(self, me_dir, options) 1461 1462 self.mode = 'aMCatNLO' 1463 self.nb_core = 0 1464 self.prompt = "%s>"%os.path.basename(pjoin(self.me_dir)) 1465 1466 1467 self.load_results_db() 1468 self.results.def_web_mode(self.web) 1469 # check that compiler is gfortran 4.6 or later if virtuals have been exported 1470 proc_card = open(pjoin(self.me_dir, 'Cards', 'proc_card_mg5.dat')).read() 1471 1472 if not '[real=QCD]' in proc_card: 1473 check_compiler(self.options, block=True)
1474 1475 1476 ############################################################################
1477 - def do_shower(self, line):
1478 """ run the shower on a given parton level file """ 1479 argss = self.split_arg(line) 1480 (options, argss) = _launch_parser.parse_args(argss) 1481 # check argument validity and normalise argument 1482 options = options.__dict__ 1483 options['reweightonly'] = False 1484 self.check_shower(argss, options) 1485 evt_file = pjoin(os.getcwd(), argss[0], 'events.lhe') 1486 self.ask_run_configuration('onlyshower', options) 1487 self.run_mcatnlo(evt_file, options) 1488 1489 self.update_status('', level='all', update_results=True)
1490 1491 ################################################################################
1492 - def do_plot(self, line):
1493 """Create the plot for a given run""" 1494 1495 # Since in principle, all plot are already done automaticaly 1496 args = self.split_arg(line) 1497 # Check argument's validity 1498 self.check_plot(args) 1499 logger.info('plot for run %s' % self.run_name) 1500 1501 if not self.force: 1502 self.ask_edit_cards([], args, plot=True) 1503 1504 if any([arg in ['parton'] for arg in args]): 1505 filename = pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe') 1506 if os.path.exists(filename+'.gz'): 1507 misc.gunzip(filename) 1508 if os.path.exists(filename): 1509 logger.info('Found events.lhe file for run %s' % self.run_name) 1510 shutil.move(filename, pjoin(self.me_dir, 'Events', 'unweighted_events.lhe')) 1511 self.create_plot('parton') 1512 shutil.move(pjoin(self.me_dir, 'Events', 'unweighted_events.lhe'), filename) 1513 misc.gzip(filename) 1514 1515 if any([arg in ['all','parton'] for arg in args]): 1516 filename = pjoin(self.me_dir, 'Events', self.run_name, 'MADatNLO.top') 1517 if os.path.exists(filename): 1518 logger.info('Found MADatNLO.top file for run %s' % \ 1519 self.run_name) 1520 output = pjoin(self.me_dir, 'HTML',self.run_name, 'plots_parton.html') 1521 plot_dir = pjoin(self.me_dir, 'HTML', self.run_name, 'plots_parton') 1522 1523 if not os.path.isdir(plot_dir): 1524 os.makedirs(plot_dir) 1525 top_file = pjoin(plot_dir, 'plots.top') 1526 files.cp(filename, top_file) 1527 madir = self.options['madanalysis_path'] 1528 tag = self.run_card['run_tag'] 1529 td = self.options['td_path'] 1530 misc.call(['%s/plot' % self.dirbin, madir, td], 1531 stdout = open(pjoin(plot_dir, 'plot.log'),'a'), 1532 stderr = subprocess.STDOUT, 1533 cwd=plot_dir) 1534 1535 misc.call(['%s/plot_page-pl' % self.dirbin, 1536 os.path.basename(plot_dir), 1537 'parton'], 1538 stdout = open(pjoin(plot_dir, 'plot.log'),'a'), 1539 stderr = subprocess.STDOUT, 1540 cwd=pjoin(self.me_dir, 'HTML', self.run_name)) 1541 shutil.move(pjoin(self.me_dir, 'HTML',self.run_name ,'plots.html'), 1542 output) 1543 1544 os.remove(pjoin(self.me_dir, 'Events', 'plots.top')) 1545 1546 if any([arg in ['all','shower'] for arg in args]): 1547 filenames = misc.glob('events_*.lhe.gz', pjoin(self.me_dir, 'Events', self.run_name)) 1548 if len(filenames) != 1: 1549 filenames = misc.glob('events_*.hep.gz', pjoin(self.me_dir, 'Events', self.run_name)) 1550 if len(filenames) != 1: 1551 logger.info('No shower-level event file found for run %s' % \ 1552 self.run_name) 1553 return 1554 filename = filenames[0] 1555 misc.gunzip(filename, keep=True, stdout=pjoin(self.me_dir, 'Events','pythia_events.hep')) 1556 1557 if not os.path.exists(pjoin(self.me_dir, 'Cards', 'pythia_card.dat')): 1558 if aMCatNLO and not self.options['mg5_path']: 1559 raise "plotting NLO HEP files requires MG5 utilities." 1560 1561 files.cp(pjoin(self.options['mg5_path'], 'Template','LO', 'Cards', 'pythia_card_default.dat'), 1562 pjoin(self.me_dir, 'Cards', 'pythia_card.dat')) 1563 self.run_hep2lhe() 1564 else: 1565 filename = filenames[0] 1566 misc.gunzip(filename, keep=True, stdout=pjoin(self.me_dir, 'Events','pythia_events.hep')) 1567 1568 self.create_plot('shower') 1569 lhe_file_name = filename.replace('.hep.gz', '.lhe') 1570 shutil.move(pjoin(self.me_dir, 'Events','pythia_events.lhe'), 1571 lhe_file_name) 1572 misc.gzip(lhe_file_name) 1573 1574 if any([arg in ['all','pgs'] for arg in args]): 1575 filename = pjoin(self.me_dir, 'Events', self.run_name, 1576 '%s_pgs_events.lhco' % self.run_tag) 1577 if os.path.exists(filename+'.gz'): 1578 misc.gunzip(filename) 1579 if os.path.exists(filename): 1580 self.create_plot('PGS') 1581 misc.gzip(filename) 1582 else: 1583 logger.info('No valid files to make PGS plots') 1584 1585 if any([arg in ['all','delphes'] for arg in args]): 1586 filename = pjoin(self.me_dir, 'Events', self.run_name, 1587 '%s_delphes_events.lhco' % self.run_tag) 1588 if os.path.exists(filename+'.gz'): 1589 misc.gunzip(filename) 1590 if os.path.exists(filename): 1591 #shutil.move(filename, pjoin(self.me_dir, 'Events','delphes_events.lhco')) 1592 self.create_plot('Delphes') 1593 #shutil.move(pjoin(self.me_dir, 'Events','delphes_events.lhco'), filename) 1594 misc.gzip(filename) 1595 else: 1596 logger.info('No valid files to make Delphes plots')
1597 1598 1599 ############################################################################
1600 - def do_calculate_xsect(self, line):
1601 """Main commands: calculates LO/NLO cross-section, using madevent_mintFO 1602 this function wraps the do_launch one""" 1603 1604 self.start_time = time.time() 1605 argss = self.split_arg(line) 1606 # check argument validity and normalise argument 1607 (options, argss) = _calculate_xsect_parser.parse_args(argss) 1608 options = options.__dict__ 1609 options['reweightonly'] = False 1610 options['parton'] = True 1611 self.check_calculate_xsect(argss, options) 1612 self.do_launch(line, options, argss)
1613 1614 ############################################################################
1615 - def do_banner_run(self, line):
1616 """Make a run from the banner file""" 1617 1618 args = self.split_arg(line) 1619 #check the validity of the arguments 1620 self.check_banner_run(args) 1621 1622 # Remove previous cards 1623 for name in ['shower_card.dat', 'madspin_card.dat']: 1624 try: 1625 os.remove(pjoin(self.me_dir, 'Cards', name)) 1626 except Exception: 1627 pass 1628 1629 banner_mod.split_banner(args[0], self.me_dir, proc_card=False) 1630 1631 # Check if we want to modify the run 1632 if not self.force: 1633 ans = self.ask('Do you want to modify the Run type and/or any of the Cards?', 'n', ['y','n']) 1634 if ans == 'n': 1635 self.force = True 1636 1637 # Compute run mode: 1638 if self.force: 1639 mode_status = {'order': 'NLO', 'fixed_order': False, 'madspin':False, 'shower':True} 1640 banner = banner_mod.Banner(args[0]) 1641 for line in banner['run_settings']: 1642 if '=' in line: 1643 mode, value = [t.strip() for t in line.split('=')] 1644 mode_status[mode] = value 1645 else: 1646 mode_status = {} 1647 1648 # Call Generate events 1649 self.do_launch('-n %s %s' % (self.run_name, '-f' if self.force else ''), 1650 switch=mode_status)
1651 1652 ############################################################################
1653 - def do_generate_events(self, line):
1654 """Main commands: generate events 1655 this function just wraps the do_launch one""" 1656 self.do_launch(line)
1657 1658 1659 ############################################################################
1660 - def do_treatcards(self, line, amcatnlo=True,mode=''):
1661 """Advanced commands: this is for creating the correct run_card.inc from the nlo format""" 1662 #check if no 'Auto' are present in the file 1663 self.check_param_card(pjoin(self.me_dir, 'Cards','param_card.dat')) 1664 1665 # propagate the FO_card entry FO_LHE_weight_ratio to the run_card. 1666 # this variable is system only in the run_card 1667 # can not be done in EditCard since this parameter is not written in the 1668 # run_card directly. 1669 if mode in ['LO', 'NLO']: 1670 name = 'fo_lhe_weight_ratio' 1671 FO_card = analyse_card.FOAnalyseCard(pjoin(self.me_dir,'Cards', 'FO_analyse_card.dat')) 1672 if name in FO_card: 1673 self.run_card.set(name, FO_card[name], user=False) 1674 name = 'fo_lhe_postprocessing' 1675 if name in FO_card: 1676 self.run_card.set(name, FO_card[name], user=False) 1677 1678 return super(aMCatNLOCmd,self).do_treatcards(line, amcatnlo)
1679 1680 ############################################################################
1681 - def set_configuration(self, amcatnlo=True, **opt):
1682 """assign all configuration variable from file 1683 loop over the different config file if config_file not define """ 1684 return super(aMCatNLOCmd,self).set_configuration(amcatnlo=amcatnlo, **opt)
1685 1686 ############################################################################
1687 - def do_launch(self, line, options={}, argss=[], switch={}):
1688 """Main commands: launch the full chain 1689 options and args are relevant if the function is called from other 1690 functions, such as generate_events or calculate_xsect 1691 mode gives the list of switch needed for the computation (usefull for banner_run) 1692 """ 1693 1694 if not argss and not options: 1695 self.start_time = time.time() 1696 argss = self.split_arg(line) 1697 # check argument validity and normalise argument 1698 (options, argss) = _launch_parser.parse_args(argss) 1699 options = options.__dict__ 1700 self.check_launch(argss, options) 1701 1702 1703 if 'run_name' in options.keys() and options['run_name']: 1704 self.run_name = options['run_name'] 1705 # if a dir with the given run_name already exists 1706 # remove it and warn the user 1707 if os.path.isdir(pjoin(self.me_dir, 'Events', self.run_name)): 1708 logger.warning('Removing old run information in \n'+ 1709 pjoin(self.me_dir, 'Events', self.run_name)) 1710 files.rm(pjoin(self.me_dir, 'Events', self.run_name)) 1711 self.results.delete_run(self.run_name) 1712 else: 1713 self.run_name = '' # will be set later 1714 1715 if options['multicore']: 1716 self.cluster_mode = 2 1717 elif options['cluster']: 1718 self.cluster_mode = 1 1719 1720 if not switch: 1721 mode = argss[0] 1722 1723 if mode in ['LO', 'NLO']: 1724 options['parton'] = True 1725 mode = self.ask_run_configuration(mode, options) 1726 else: 1727 mode = self.ask_run_configuration('auto', options, switch) 1728 1729 self.results.add_detail('run_mode', mode) 1730 1731 self.update_status('Starting run', level=None, update_results=True) 1732 1733 if self.options['automatic_html_opening']: 1734 misc.open_file(os.path.join(self.me_dir, 'crossx.html')) 1735 self.options['automatic_html_opening'] = False 1736 1737 if '+' in mode: 1738 mode = mode.split('+')[0] 1739 self.compile(mode, options) 1740 evt_file = self.run(mode, options) 1741 1742 if self.run_card['nevents'] == 0 and not mode in ['LO', 'NLO']: 1743 logger.info('No event file generated: grids have been set-up with a '\ 1744 'relative precision of %s' % self.run_card['req_acc']) 1745 return 1746 1747 if not mode in ['LO', 'NLO']: 1748 assert evt_file == pjoin(self.me_dir,'Events', self.run_name, 'events.lhe'), '%s != %s' %(evt_file, pjoin(self.me_dir,'Events', self.run_name, 'events.lhe.gz')) 1749 1750 if self.run_card['systematics_program'] == 'systematics': 1751 self.exec_cmd('systematics %s %s ' % (self.run_name, ' '.join(self.run_card['systematics_arguments']))) 1752 1753 self.exec_cmd('reweight -from_cards', postcmd=False) 1754 self.exec_cmd('decay_events -from_cards', postcmd=False) 1755 evt_file = pjoin(self.me_dir,'Events', self.run_name, 'events.lhe') 1756 1757 if not mode in ['LO', 'NLO', 'noshower', 'noshowerLO'] \ 1758 and not options['parton']: 1759 self.run_mcatnlo(evt_file, options) 1760 self.exec_cmd('madanalysis5_hadron --no_default', postcmd=False, printcmd=False) 1761 1762 elif mode == 'noshower': 1763 logger.warning("""You have chosen not to run a parton shower. NLO events without showering are NOT physical. Please, shower the Les Houches events before using them for physics analyses.""") 1764 1765 1766 self.update_status('', level='all', update_results=True) 1767 if self.run_card['ickkw'] == 3 and \ 1768 (mode in ['noshower'] or \ 1769 (('PYTHIA8' not in self.run_card['parton_shower'].upper()) and (mode in ['aMC@NLO']))): 1770 logger.warning("""You are running with FxFx merging enabled. To be able to merge samples of various multiplicities without double counting, you have to remove some events after showering 'by hand'. Please read http://amcatnlo.cern.ch/FxFx_merging.htm for more details.""") 1771 1772 self.store_result() 1773 #check if the param_card defines a scan. 1774 if self.param_card_iterator: 1775 cpath = pjoin(self.me_dir,'Cards','param_card.dat') 1776 param_card_iterator = self.param_card_iterator 1777 self.param_card_iterator = [] #avoid to next generate go trough here 1778 param_card_iterator.store_entry(self.run_name, self.results.current['cross'], 1779 error=self.results.current['error'], 1780 param_card_path=cpath) 1781 orig_name = self.run_name 1782 #go trough the scal 1783 with misc.TMP_variable(self, 'allow_notification_center', False): 1784 for i,card in enumerate(param_card_iterator): 1785 card.write(cpath) 1786 self.check_param_card(cpath, dependent=True) 1787 if not options['force']: 1788 options['force'] = True 1789 if options['run_name']: 1790 options['run_name'] = '%s_%s' % (orig_name, i+1) 1791 if not argss: 1792 argss = [mode, "-f"] 1793 elif argss[0] == "auto": 1794 argss[0] = mode 1795 self.do_launch("", options=options, argss=argss, switch=switch) 1796 #self.exec_cmd("launch -f ",precmd=True, postcmd=True,errorhandling=False) 1797 param_card_iterator.store_entry(self.run_name, self.results.current['cross'], 1798 error=self.results.current['error'], 1799 param_card_path=cpath) 1800 #restore original param_card 1801 param_card_iterator.write(pjoin(self.me_dir,'Cards','param_card.dat')) 1802 name = misc.get_scan_name(orig_name, self.run_name) 1803 path = pjoin(self.me_dir, 'Events','scan_%s.txt' % name) 1804 logger.info("write all cross-section results in %s" % path, '$MG:BOLD') 1805 param_card_iterator.write_summary(path) 1806 1807 if self.allow_notification_center: 1808 misc.apple_notify('Run %s finished' % os.path.basename(self.me_dir), 1809 '%s: %s +- %s ' % (self.results.current['run_name'], 1810 self.results.current['cross'], 1811 self.results.current['error']))
1812 1813 1814 ############################################################################
1815 - def do_compile(self, line):
1816 """Advanced commands: just compile the executables """ 1817 argss = self.split_arg(line) 1818 # check argument validity and normalise argument 1819 (options, argss) = _compile_parser.parse_args(argss) 1820 options = options.__dict__ 1821 options['reweightonly'] = False 1822 options['nocompile'] = False 1823 self.check_compile(argss, options) 1824 1825 mode = {'FO': 'NLO', 'MC': 'aMC@NLO'}[argss[0]] 1826 self.ask_run_configuration(mode, options) 1827 self.compile(mode, options) 1828 1829 1830 self.update_status('', level='all', update_results=True)
1831 1832
1833 - def update_random_seed(self):
1834 """Update random number seed with the value from the run_card. 1835 If this is 0, update the number according to a fresh one""" 1836 iseed = self.run_card['iseed'] 1837 if iseed == 0: 1838 randinit = open(pjoin(self.me_dir, 'SubProcesses', 'randinit')) 1839 iseed = int(randinit.read()[2:]) + 1 1840 randinit.close() 1841 randinit = open(pjoin(self.me_dir, 'SubProcesses', 'randinit'), 'w') 1842 randinit.write('r=%d' % iseed) 1843 randinit.close()
1844 1845
1846 - def run(self, mode, options):
1847 """runs aMC@NLO. Returns the name of the event file created""" 1848 logger.info('Starting run') 1849 1850 if not 'only_generation' in options.keys(): 1851 options['only_generation'] = False 1852 1853 # for second step in applgrid mode, do only the event generation step 1854 if mode in ['LO', 'NLO'] and self.run_card['iappl'] == 2 and not options['only_generation']: 1855 options['only_generation'] = True 1856 self.get_characteristics(pjoin(self.me_dir, 'SubProcesses', 'proc_characteristics')) 1857 self.setup_cluster_or_multicore() 1858 self.update_random_seed() 1859 #find and keep track of all the jobs 1860 folder_names = {'LO': ['born_G*'], 'NLO': ['all_G*'], 1861 'aMC@LO': ['GB*'], 'aMC@NLO': ['GF*']} 1862 folder_names['noshower'] = folder_names['aMC@NLO'] 1863 folder_names['noshowerLO'] = folder_names['aMC@LO'] 1864 p_dirs = [d for d in \ 1865 open(pjoin(self.me_dir, 'SubProcesses', 'subproc.mg')).read().split('\n') if d] 1866 #Clean previous results 1867 self.clean_previous_results(options,p_dirs,folder_names[mode]) 1868 1869 mcatnlo_status = ['Setting up grids', 'Computing upper envelope', 'Generating events'] 1870 1871 1872 if options['reweightonly']: 1873 event_norm=self.run_card['event_norm'] 1874 nevents=self.run_card['nevents'] 1875 return self.reweight_and_collect_events(options, mode, nevents, event_norm) 1876 1877 if mode in ['LO', 'NLO']: 1878 # this is for fixed order runs 1879 mode_dict = {'NLO': 'all', 'LO': 'born'} 1880 logger.info('Doing fixed order %s' % mode) 1881 req_acc = self.run_card['req_acc_FO'] 1882 1883 # Re-distribute the grids for the 2nd step of the applgrid 1884 # running 1885 if self.run_card['iappl'] == 2: 1886 self.applgrid_distribute(options,mode_dict[mode],p_dirs) 1887 1888 # create a list of dictionaries "jobs_to_run" with all the 1889 # jobs that need to be run 1890 integration_step=-1 1891 jobs_to_run,jobs_to_collect,integration_step = self.create_jobs_to_run(options,p_dirs, \ 1892 req_acc,mode_dict[mode],integration_step,mode,fixed_order=True) 1893 self.prepare_directories(jobs_to_run,mode) 1894 1895 # loop over the integration steps. After every step, check 1896 # if we have the required accuracy. If this is the case, 1897 # stop running, else do another step. 1898 while True: 1899 integration_step=integration_step+1 1900 self.run_all_jobs(jobs_to_run,integration_step) 1901 self.collect_log_files(jobs_to_run,integration_step) 1902 jobs_to_run,jobs_to_collect=self.collect_the_results(options,req_acc,jobs_to_run, \ 1903 jobs_to_collect,integration_step,mode,mode_dict[mode]) 1904 if not jobs_to_run: 1905 # there are no more jobs to run (jobs_to_run is empty) 1906 break 1907 # We are done. 1908 self.finalise_run_FO(folder_names[mode],jobs_to_collect) 1909 self.update_status('Run complete', level='parton', update_results=True) 1910 return 1911 1912 elif mode in ['aMC@NLO','aMC@LO','noshower','noshowerLO']: 1913 if self.ninitial == 1: 1914 raise aMCatNLOError('Decay processes can only be run at fixed order.') 1915 mode_dict = {'aMC@NLO': 'all', 'aMC@LO': 'born',\ 1916 'noshower': 'all', 'noshowerLO': 'born'} 1917 shower = self.run_card['parton_shower'].upper() 1918 nevents = self.run_card['nevents'] 1919 req_acc = self.run_card['req_acc'] 1920 if nevents == 0 and req_acc < 0 : 1921 raise aMCatNLOError('Cannot determine the required accuracy from the number '\ 1922 'of events, because 0 events requested. Please set '\ 1923 'the "req_acc" parameter in the run_card to a value '\ 1924 'between 0 and 1') 1925 elif req_acc >1 or req_acc == 0 : 1926 raise aMCatNLOError('Required accuracy ("req_acc" in the run_card) should '\ 1927 'be between larger than 0 and smaller than 1, '\ 1928 'or set to -1 for automatic determination. Current '\ 1929 'value is %f' % req_acc) 1930 # For more than 1M events, set req_acc to 0.001 (except when it was explicitly set in the run_card) 1931 elif req_acc < 0 and nevents > 1000000 : 1932 req_acc=0.001 1933 1934 shower_list = ['HERWIG6', 'HERWIGPP', 'PYTHIA6Q', 'PYTHIA6PT', 'PYTHIA8'] 1935 1936 if not shower in shower_list: 1937 raise aMCatNLOError('%s is not a valid parton shower. '\ 1938 'Please use one of the following: %s' \ 1939 % (shower, ', '.join(shower_list))) 1940 1941 # check that PYTHIA6PT is not used for processes with FSR 1942 if shower == 'PYTHIA6PT' and self.proc_characteristics['has_fsr']: 1943 raise aMCatNLOError('PYTHIA6PT does not support processes with FSR') 1944 1945 if mode in ['aMC@NLO', 'aMC@LO']: 1946 logger.info('Doing %s matched to parton shower' % mode[4:]) 1947 elif mode in ['noshower','noshowerLO']: 1948 logger.info('Generating events without running the shower.') 1949 elif options['only_generation']: 1950 logger.info('Generating events starting from existing results') 1951 1952 jobs_to_run,jobs_to_collect,integration_step = self.create_jobs_to_run(options,p_dirs, \ 1953 req_acc,mode_dict[mode],1,mode,fixed_order=False) 1954 # Make sure to update all the jobs to be ready for the event generation step 1955 if options['only_generation']: 1956 jobs_to_run,jobs_to_collect=self.collect_the_results(options,req_acc,jobs_to_run, \ 1957 jobs_to_collect,1,mode,mode_dict[mode],fixed_order=False) 1958 else: 1959 self.prepare_directories(jobs_to_run,mode,fixed_order=False) 1960 1961 1962 # Main loop over the three MINT generation steps: 1963 for mint_step, status in enumerate(mcatnlo_status): 1964 if options['only_generation'] and mint_step < 2: 1965 continue 1966 self.update_status(status, level='parton') 1967 self.run_all_jobs(jobs_to_run,mint_step,fixed_order=False) 1968 self.collect_log_files(jobs_to_run,mint_step) 1969 jobs_to_run,jobs_to_collect=self.collect_the_results(options,req_acc,jobs_to_run, \ 1970 jobs_to_collect,mint_step,mode,mode_dict[mode],fixed_order=False) 1971 if mint_step+1==2 and nevents==0: 1972 self.print_summary(options,2,mode) 1973 return 1974 1975 # Sanity check on the event files. If error the jobs are resubmitted 1976 self.check_event_files(jobs_to_collect) 1977 1978 if self.cluster_mode == 1: 1979 #if cluster run, wait 10 sec so that event files are transferred back 1980 self.update_status( 1981 'Waiting while files are transferred back from the cluster nodes', 1982 level='parton') 1983 time.sleep(10) 1984 1985 event_norm=self.run_card['event_norm'] 1986 return self.reweight_and_collect_events(options, mode, nevents, event_norm)
1987
1988 - def create_jobs_to_run(self,options,p_dirs,req_acc,run_mode,\ 1989 integration_step,mode,fixed_order=True):
1990 """Creates a list of dictionaries with all the jobs to be run""" 1991 jobs_to_run=[] 1992 if not options['only_generation']: 1993 # Fresh, new run. Check all the P*/channels.txt files 1994 # (created by the 'gensym' executable) to set-up all the 1995 # jobs using the default inputs. 1996 npoints = self.run_card['npoints_FO_grid'] 1997 niters = self.run_card['niters_FO_grid'] 1998 for p_dir in p_dirs: 1999 try: 2000 with open(pjoin(self.me_dir,'SubProcesses',p_dir,'channels.txt')) as chan_file: 2001 channels=chan_file.readline().split() 2002 except IOError: 2003 logger.warning('No integration channels found for contribution %s' % p_dir) 2004 continue 2005 if fixed_order: 2006 lch=len(channels) 2007 maxchannels=20 # combine up to 20 channels in a single job 2008 if self.run_card['iappl'] != 0: maxchannels=1 2009 njobs=(int(lch/maxchannels)+1 if lch%maxchannels!= 0 \ 2010 else int(lch/maxchannels)) 2011 for nj in range(1,njobs+1): 2012 job={} 2013 job['p_dir']=p_dir 2014 job['channel']=str(nj) 2015 job['nchans']=(int(lch/njobs)+1 if nj <= lch%njobs else int(lch/njobs)) 2016 job['configs']=' '.join(channels[:job['nchans']]) 2017 del channels[:job['nchans']] 2018 job['split']=0 2019 if req_acc == -1: 2020 job['accuracy']=0 2021 job['niters']=niters 2022 job['npoints']=npoints 2023 elif req_acc > 0: 2024 job['accuracy']=0.05 2025 job['niters']=6 2026 job['npoints']=-1 2027 else: 2028 raise aMCatNLOError('No consistent "req_acc_FO" set. Use a value '+ 2029 'between 0 and 1 or set it equal to -1.') 2030 job['mint_mode']=0 2031 job['run_mode']=run_mode 2032 job['wgt_frac']=1.0 2033 job['wgt_mult']=1.0 2034 jobs_to_run.append(job) 2035 if channels: 2036 raise aMCatNLOError('"channels" is not empty %s' % channels) 2037 else: 2038 for channel in channels: 2039 job={} 2040 job['p_dir']=p_dir 2041 job['channel']=channel 2042 job['split']=0 2043 job['accuracy']=0.03 2044 job['niters']=12 2045 job['npoints']=-1 2046 job['mint_mode']=0 2047 job['run_mode']=run_mode 2048 job['wgt_frac']=1.0 2049 jobs_to_run.append(job) 2050 jobs_to_collect=copy.copy(jobs_to_run) # These are all jobs 2051 else: 2052 # if options['only_generation'] is true, just read the current jobs from file 2053 try: 2054 with open(pjoin(self.me_dir,"SubProcesses","job_status.pkl"),'rb') as f: 2055 jobs_to_collect=pickle.load(f) 2056 for job in jobs_to_collect: 2057 job['dirname']=pjoin(self.me_dir,'SubProcesses',job['dirname'].rsplit('/SubProcesses/',1)[1]) 2058 jobs_to_run=copy.copy(jobs_to_collect) 2059 except: 2060 raise aMCatNLOError('Cannot reconstruct jobs from saved job status in %s' % \ 2061 pjoin(self.me_dir,'SubProcesses','job_status.pkl')) 2062 # Update cross sections and determine which jobs to run next 2063 if fixed_order: 2064 jobs_to_run,jobs_to_collect=self.collect_the_results(options,req_acc,jobs_to_run, 2065 jobs_to_collect,integration_step,mode,run_mode) 2066 # Update the integration_step to make sure that nothing will be overwritten 2067 integration_step=1 2068 for job in jobs_to_run: 2069 while os.path.exists(pjoin(job['dirname'],'res_%s.dat' % integration_step)): 2070 integration_step=integration_step+1 2071 integration_step=integration_step-1 2072 else: 2073 self.append_the_results(jobs_to_collect,integration_step) 2074 return jobs_to_run,jobs_to_collect,integration_step
2075
2076 - def prepare_directories(self,jobs_to_run,mode,fixed_order=True):
2077 """Set-up the G* directories for running""" 2078 name_suffix={'born' :'B' , 'all':'F'} 2079 for job in jobs_to_run: 2080 if job['split'] == 0: 2081 if fixed_order : 2082 dirname=pjoin(self.me_dir,'SubProcesses',job['p_dir'], 2083 job['run_mode']+'_G'+job['channel']) 2084 else: 2085 dirname=pjoin(self.me_dir,'SubProcesses',job['p_dir'], 2086 'G'+name_suffix[job['run_mode']]+job['channel']) 2087 else: 2088 if fixed_order : 2089 dirname=pjoin(self.me_dir,'SubProcesses',job['p_dir'], 2090 job['run_mode']+'_G'+job['channel']+'_'+str(job['split'])) 2091 else: 2092 dirname=pjoin(self.me_dir,'SubProcesses',job['p_dir'], 2093 'G'+name_suffix[job['run_mode']]+job['channel']+'_'+str(job['split'])) 2094 job['dirname']=dirname 2095 if not os.path.isdir(dirname): 2096 os.makedirs(dirname) 2097 self.write_input_file(job,fixed_order) 2098 # link or copy the grids from the base directory to the split directory: 2099 if not fixed_order: 2100 if job['split'] != 0: 2101 for f in ['grid.MC_integer','mint_grids','res_1']: 2102 if not os.path.isfile(pjoin(job['dirname'],f)): 2103 files.ln(pjoin(job['dirname'].rsplit("_",1)[0],f),job['dirname']) 2104 else: 2105 if job['split'] != 0: 2106 for f in ['grid.MC_integer','mint_grids']: 2107 files.cp(pjoin(job['dirname'].rsplit("_",1)[0],f),job['dirname'])
2108 2109
2110 - def write_input_file(self,job,fixed_order):
2111 """write the input file for the madevent_mint* executable in the appropriate directory""" 2112 if fixed_order: 2113 content= \ 2114 """NPOINTS = %(npoints)s 2115 NITERATIONS = %(niters)s 2116 ACCURACY = %(accuracy)s 2117 ADAPT_GRID = 2 2118 MULTICHANNEL = 1 2119 SUM_HELICITY = 1 2120 NCHANS = %(nchans)s 2121 CHANNEL = %(configs)s 2122 SPLIT = %(split)s 2123 WGT_MULT= %(wgt_mult)s 2124 RUN_MODE = %(run_mode)s 2125 RESTART = %(mint_mode)s 2126 """ \ 2127 % job 2128 else: 2129 content = \ 2130 """-1 12 ! points, iterations 2131 %(accuracy)s ! desired fractional accuracy 2132 1 -0.1 ! alpha, beta for Gsoft 2133 -1 -0.1 ! alpha, beta for Gazi 2134 1 ! Suppress amplitude (0 no, 1 yes)? 2135 1 ! Exact helicity sum (0 yes, n = number/event)? 2136 %(channel)s ! Enter Configuration Number: 2137 %(mint_mode)s ! MINT imode: 0 to set-up grids, 1 to perform integral, 2 generate events 2138 1 1 1 ! if imode is 1: Folding parameters for xi_i, phi_i and y_ij 2139 %(run_mode)s ! all, born, real, virt 2140 """ \ 2141 % job 2142 with open(pjoin(job['dirname'], 'input_app.txt'), 'w') as input_file: 2143 input_file.write(content)
2144 2145
2146 - def run_all_jobs(self,jobs_to_run,integration_step,fixed_order=True):
2147 """Loops over the jobs_to_run and executes them using the function 'run_exe'""" 2148 if fixed_order: 2149 if integration_step == 0: 2150 self.update_status('Setting up grids', level=None) 2151 else: 2152 self.update_status('Refining results, step %i' % integration_step, level=None) 2153 self.ijob = 0 2154 name_suffix={'born' :'B', 'all':'F'} 2155 if fixed_order: 2156 run_type="Fixed order integration step %s" % integration_step 2157 else: 2158 run_type="MINT step %s" % integration_step 2159 self.njobs=len(jobs_to_run) 2160 for job in jobs_to_run: 2161 executable='ajob1' 2162 if fixed_order: 2163 arguments=[job['channel'],job['run_mode'], \ 2164 str(job['split']),str(integration_step)] 2165 else: 2166 arguments=[job['channel'],name_suffix[job['run_mode']], \ 2167 str(job['split']),str(integration_step)] 2168 self.run_exe(executable,arguments,run_type, 2169 cwd=pjoin(self.me_dir,'SubProcesses',job['p_dir'])) 2170 2171 if self.cluster_mode == 2: 2172 time.sleep(1) # security to allow all jobs to be launched 2173 self.wait_for_complete(run_type)
2174 2175
2176 - def collect_the_results(self,options,req_acc,jobs_to_run,jobs_to_collect,\ 2177 integration_step,mode,run_mode,fixed_order=True):
2178 """Collect the results, make HTML pages, print the summary and 2179 determine if there are more jobs to run. Returns the list 2180 of the jobs that still need to be run, as well as the 2181 complete list of jobs that need to be collected to get the 2182 final answer. 2183 """ 2184 # Get the results of the current integration/MINT step 2185 self.append_the_results(jobs_to_run,integration_step) 2186 self.cross_sect_dict = self.write_res_txt_file(jobs_to_collect,integration_step) 2187 # Update HTML pages 2188 if fixed_order: 2189 cross, error = self.make_make_all_html_results(folder_names=['%s*' % run_mode], 2190 jobs=jobs_to_collect) 2191 else: 2192 name_suffix={'born' :'B' , 'all':'F'} 2193 cross, error = self.make_make_all_html_results(folder_names=['G%s*' % name_suffix[run_mode]]) 2194 self.results.add_detail('cross', cross) 2195 self.results.add_detail('error', error) 2196 # Combine grids from split fixed order jobs 2197 if fixed_order: 2198 jobs_to_run=self.combine_split_order_run(jobs_to_run) 2199 # Set-up jobs for the next iteration/MINT step 2200 jobs_to_run_new=self.update_jobs_to_run(req_acc,integration_step,jobs_to_run,fixed_order) 2201 # IF THERE ARE NO MORE JOBS, WE ARE DONE!!! 2202 if fixed_order: 2203 # Write the jobs_to_collect directory to file so that we 2204 # can restart them later (with only-generation option) 2205 with open(pjoin(self.me_dir,"SubProcesses","job_status.pkl"),'wb') as f: 2206 pickle.dump(jobs_to_collect,f) 2207 # Print summary 2208 if (not jobs_to_run_new) and fixed_order: 2209 # print final summary of results (for fixed order) 2210 scale_pdf_info=self.collect_scale_pdf_info(options,jobs_to_collect) 2211 self.print_summary(options,integration_step,mode,scale_pdf_info,done=True) 2212 return jobs_to_run_new,jobs_to_collect 2213 elif jobs_to_run_new: 2214 # print intermediate summary of results 2215 scale_pdf_info=[] 2216 self.print_summary(options,integration_step,mode,scale_pdf_info,done=False) 2217 else: 2218 # When we are done for (N)LO+PS runs, do not print 2219 # anything yet. This will be done after the reweighting 2220 # and collection of the events 2221 scale_pdf_info=[] 2222 # Prepare for the next integration/MINT step 2223 if (not fixed_order) and integration_step+1 == 2 : 2224 # Write the jobs_to_collect directory to file so that we 2225 # can restart them later (with only-generation option) 2226 with open(pjoin(self.me_dir,"SubProcesses","job_status.pkl"),'wb') as f: 2227 pickle.dump(jobs_to_collect,f) 2228 # next step is event generation (mint_step 2) 2229 jobs_to_run_new,jobs_to_collect_new= \ 2230 self.check_the_need_to_split(jobs_to_run_new,jobs_to_collect) 2231 self.prepare_directories(jobs_to_run_new,mode,fixed_order) 2232 self.write_nevents_unweighted_file(jobs_to_collect_new,jobs_to_collect) 2233 self.write_nevts_files(jobs_to_run_new) 2234 else: 2235 if fixed_order and self.run_card['iappl'] == 0 \ 2236 and self.run_card['req_acc_FO'] > 0: 2237 jobs_to_run_new,jobs_to_collect= \ 2238 self.split_jobs_fixed_order(jobs_to_run_new,jobs_to_collect) 2239 self.prepare_directories(jobs_to_run_new,mode,fixed_order) 2240 jobs_to_collect_new=jobs_to_collect 2241 if fixed_order: 2242 # Write the jobs_to_collect directory to file so that we 2243 # can collect plots (by hand) even if there was some 2244 # error. Mainly for debugging only. Normally this file 2245 # should not be used. (Rather, use 'job_status.pkl' which 2246 # is only written if all jobs are finished correctly) 2247 with open(pjoin(self.me_dir,"SubProcesses","job_status2.pkl"),'wb') as f: 2248 pickle.dump(jobs_to_collect_new,f) 2249 return jobs_to_run_new,jobs_to_collect_new
2250 2251
2252 - def write_nevents_unweighted_file(self,jobs,jobs0events):
2253 """writes the nevents_unweighted file in the SubProcesses directory. 2254 We also need to write the jobs that will generate 0 events, 2255 because that makes sure that the cross section from those channels 2256 is taken into account in the event weights (by collect_events.f). 2257 """ 2258 content=[] 2259 for job in jobs: 2260 path=pjoin(job['dirname'].split('/')[-2],job['dirname'].split('/')[-1]) 2261 lhefile=pjoin(path,'events.lhe') 2262 content.append(' %s %d %9e %9e' % \ 2263 (lhefile.ljust(40),job['nevents'],job['resultABS']*job['wgt_frac'],job['wgt_frac'])) 2264 for job in jobs0events: 2265 if job['nevents']==0: 2266 path=pjoin(job['dirname'].split('/')[-2],job['dirname'].split('/')[-1]) 2267 lhefile=pjoin(path,'events.lhe') 2268 content.append(' %s %d %9e %9e' % \ 2269 (lhefile.ljust(40),job['nevents'],job['resultABS'],1.)) 2270 with open(pjoin(self.me_dir,'SubProcesses',"nevents_unweighted"),'w') as f: 2271 f.write('\n'.join(content)+'\n')
2272
2273 - def write_nevts_files(self,jobs):
2274 """write the nevts files in the SubProcesses/P*/G*/ directories""" 2275 for job in jobs: 2276 with open(pjoin(job['dirname'],'nevts'),'w') as f: 2277 if self.run_card['event_norm'].lower()=='bias': 2278 f.write('%i %f\n' % (job['nevents'],self.cross_sect_dict['xseca'])) 2279 else: 2280 f.write('%i\n' % job['nevents'])
2281
2282 - def combine_split_order_run(self,jobs_to_run):
2283 """Combines jobs and grids from split jobs that have been run""" 2284 # combine the jobs that need to be combined in job 2285 # groups. Simply combine the ones that have the same p_dir and 2286 # same channel. 2287 jobgroups_to_combine=[] 2288 jobs_to_run_new=[] 2289 for job in jobs_to_run: 2290 if job['split'] == 0: 2291 job['combined']=1 2292 jobs_to_run_new.append(job) # this jobs wasn't split 2293 elif job['split'] == 1: 2294 jobgroups_to_combine.append(filter(lambda j: j['p_dir'] == job['p_dir'] and \ 2295 j['channel'] == job['channel'], jobs_to_run)) 2296 else: 2297 continue 2298 for job_group in jobgroups_to_combine: 2299 # Combine the grids (mint-grids & MC-integer grids) first 2300 self.combine_split_order_grids(job_group) 2301 jobs_to_run_new.append(self.combine_split_order_jobs(job_group)) 2302 return jobs_to_run_new
2303
2304 - def combine_split_order_jobs(self,job_group):
2305 """combine the jobs in job_group and return a single summed job""" 2306 # first copy one of the jobs in 'jobs' 2307 sum_job=copy.copy(job_group[0]) 2308 # update the information to have a 'non-split' job: 2309 sum_job['dirname']=pjoin(sum_job['dirname'].rsplit('_',1)[0]) 2310 sum_job['split']=0 2311 sum_job['wgt_mult']=1.0 2312 sum_job['combined']=len(job_group) 2313 # information to be summed: 2314 keys=['niters_done','npoints_done','niters','npoints',\ 2315 'result','resultABS','time_spend'] 2316 keys2=['error','errorABS'] 2317 # information to be summed in quadrature: 2318 for key in keys2: 2319 sum_job[key]=math.pow(sum_job[key],2) 2320 # Loop over the jobs and sum the information 2321 for i,job in enumerate(job_group): 2322 if i==0 : continue # skip the first 2323 for key in keys: 2324 sum_job[key]+=job[key] 2325 for key in keys2: 2326 sum_job[key]+=math.pow(job[key],2) 2327 for key in keys2: 2328 sum_job[key]=math.sqrt(sum_job[key]) 2329 sum_job['err_percABS'] = sum_job['errorABS']/sum_job['resultABS']*100. 2330 sum_job['err_perc'] = sum_job['error']/sum_job['result']*100. 2331 sum_job['niters']=int(sum_job['niters_done']/len(job_group)) 2332 sum_job['niters_done']=int(sum_job['niters_done']/len(job_group)) 2333 return sum_job
2334 2335
2336 - def combine_split_order_grids(self,job_group):
2337 """Combines the mint_grids and MC-integer grids from the split order 2338 jobs (fixed order only). 2339 """ 2340 files_mint_grids=[] 2341 files_MC_integer=[] 2342 location=None 2343 for job in job_group: 2344 files_mint_grids.append(pjoin(job['dirname'],'mint_grids')) 2345 files_MC_integer.append(pjoin(job['dirname'],'grid.MC_integer')) 2346 if not location: 2347 location=pjoin(job['dirname'].rsplit('_',1)[0]) 2348 else: 2349 if location != pjoin(job['dirname'].rsplit('_',1)[0]) : 2350 raise aMCatNLOError('Not all jobs have the same location. '\ 2351 +'Cannot combine them.') 2352 # Needed to average the grids (both xgrids, ave_virt and 2353 # MC_integer grids), but sum the cross section info. The 2354 # latter is only the only line that contains integers. 2355 for j,fs in enumerate([files_mint_grids,files_MC_integer]): 2356 linesoffiles=[] 2357 for f in fs: 2358 with open(f,'r+') as fi: 2359 linesoffiles.append(fi.readlines()) 2360 to_write=[] 2361 for rowgrp in zip(*linesoffiles): 2362 try: 2363 # check that last element on the line is an 2364 # integer (will raise ValueError if not the 2365 # case). If integer, this is the line that 2366 # contains information that needs to be 2367 # summed. All other lines can be averaged. 2368 is_integer = [[int(row.strip().split()[-1])] for row in rowgrp] 2369 floatsbyfile = [[float(a) for a in row.strip().split()] for row in rowgrp] 2370 floatgrps = zip(*floatsbyfile) 2371 special=[] 2372 for i,floatgrp in enumerate(floatgrps): 2373 if i==0: # sum X-sec 2374 special.append(sum(floatgrp)) 2375 elif i==1: # sum unc in quadrature 2376 special.append(math.sqrt(sum([err**2 for err in floatgrp]))) 2377 elif i==2: # average number of PS per iteration 2378 special.append(int(sum(floatgrp)/len(floatgrp))) 2379 elif i==3: # sum the number of iterations 2380 special.append(int(sum(floatgrp))) 2381 elif i==4: # average the nhits_in_grids 2382 special.append(int(sum(floatgrp)/len(floatgrp))) 2383 else: 2384 raise aMCatNLOError('"mint_grids" files not in correct format. '+\ 2385 'Cannot combine them.') 2386 to_write.append(" ".join(str(s) for s in special) + "\n") 2387 except ValueError: 2388 # just average all 2389 floatsbyfile = [[float(a) for a in row.strip().split()] for row in rowgrp] 2390 floatgrps = zip(*floatsbyfile) 2391 averages = [sum(floatgrp)/len(floatgrp) for floatgrp in floatgrps] 2392 to_write.append(" ".join(str(a) for a in averages) + "\n") 2393 # write the data over the master location 2394 if j==0: 2395 with open(pjoin(location,'mint_grids'),'w') as f: 2396 f.writelines(to_write) 2397 elif j==1: 2398 with open(pjoin(location,'grid.MC_integer'),'w') as f: 2399 f.writelines(to_write)
2400 2401
2402 - def split_jobs_fixed_order(self,jobs_to_run,jobs_to_collect):
2403 """Looks in the jobs_to_run to see if there is the need to split the 2404 jobs, depending on the expected time they take. Updates 2405 jobs_to_run and jobs_to_collect to replace the split-job by 2406 its splits. 2407 """ 2408 # determine the number jobs we should have (this is per p_dir) 2409 if self.options['run_mode'] ==2: 2410 nb_submit = int(self.options['nb_core']) 2411 elif self.options['run_mode'] ==1: 2412 nb_submit = int(self.options['cluster_size']) 2413 else: 2414 nb_submit =1 2415 # total expected aggregated running time 2416 time_expected=0 2417 for job in jobs_to_run: 2418 time_expected+=job['time_spend']*(job['niters']*job['npoints'])/ \ 2419 (job['niters_done']*job['npoints_done']) 2420 # this means that we must expect the following per job (in 2421 # ideal conditions) 2422 time_per_job=time_expected/(nb_submit*(1+len(jobs_to_run)/2)) 2423 jobs_to_run_new=[] 2424 jobs_to_collect_new=[job for job in jobs_to_collect if job['resultABS']!=0] 2425 for job in jobs_to_run: 2426 # remove current job from jobs_to_collect. Make sure 2427 # to remove all the split ones in case the original 2428 # job had been a split one (before it was re-combined) 2429 for j in filter(lambda j: j['p_dir'] == job['p_dir'] and \ 2430 j['channel'] == job['channel'], jobs_to_collect_new): 2431 jobs_to_collect_new.remove(j) 2432 time_expected=job['time_spend']*(job['niters']*job['npoints'])/ \ 2433 (job['niters_done']*job['npoints_done']) 2434 # if the time expected for this job is (much) larger than 2435 # the time spend in the previous iteration, and larger 2436 # than the expected time per job, split it 2437 if time_expected > max(2*job['time_spend']/job['combined'],time_per_job): 2438 # determine the number of splits needed 2439 nsplit=min(max(int(time_expected/max(2*job['time_spend']/job['combined'],time_per_job)),2),nb_submit) 2440 for i in range(1,nsplit+1): 2441 job_new=copy.copy(job) 2442 job_new['split']=i 2443 job_new['wgt_mult']=1./float(nsplit) 2444 job_new['dirname']=job['dirname']+'_%i' % job_new['split'] 2445 job_new['accuracy']=min(job['accuracy']*math.sqrt(float(nsplit)),0.1) 2446 if nsplit >= job['niters']: 2447 job_new['npoints']=int(job['npoints']*job['niters']/nsplit) 2448 job_new['niters']=1 2449 else: 2450 job_new['npoints']=int(job['npoints']/nsplit) 2451 jobs_to_collect_new.append(job_new) 2452 jobs_to_run_new.append(job_new) 2453 else: 2454 jobs_to_collect_new.append(job) 2455 jobs_to_run_new.append(job) 2456 return jobs_to_run_new,jobs_to_collect_new
2457 2458
2459 - def check_the_need_to_split(self,jobs_to_run,jobs_to_collect):
2460 """Looks in the jobs_to_run to see if there is the need to split the 2461 event generation step. Updates jobs_to_run and 2462 jobs_to_collect to replace the split-job by its 2463 splits. Also removes jobs that do not need any events. 2464 """ 2465 nevt_job=self.run_card['nevt_job'] 2466 if nevt_job > 0: 2467 jobs_to_collect_new=copy.copy(jobs_to_collect) 2468 for job in jobs_to_run: 2469 nevents=job['nevents'] 2470 if nevents == 0: 2471 jobs_to_collect_new.remove(job) 2472 elif nevents > nevt_job: 2473 jobs_to_collect_new.remove(job) 2474 if nevents % nevt_job != 0 : 2475 nsplit=int(nevents/nevt_job)+1 2476 else: 2477 nsplit=int(nevents/nevt_job) 2478 for i in range(1,nsplit+1): 2479 job_new=copy.copy(job) 2480 left_over=nevents % nsplit 2481 if i <= left_over: 2482 job_new['nevents']=int(nevents/nsplit)+1 2483 job_new['wgt_frac']=float(job_new['nevents'])/float(nevents) 2484 else: 2485 job_new['nevents']=int(nevents/nsplit) 2486 job_new['wgt_frac']=float(job_new['nevents'])/float(nevents) 2487 job_new['split']=i 2488 job_new['dirname']=job['dirname']+'_%i' % job_new['split'] 2489 jobs_to_collect_new.append(job_new) 2490 jobs_to_run_new=copy.copy(jobs_to_collect_new) 2491 else: 2492 jobs_to_run_new=copy.copy(jobs_to_collect) 2493 for job in jobs_to_collect: 2494 if job['nevents'] == 0: 2495 jobs_to_run_new.remove(job) 2496 jobs_to_collect_new=copy.copy(jobs_to_run_new) 2497 2498 return jobs_to_run_new,jobs_to_collect_new
2499 2500
2501 - def update_jobs_to_run(self,req_acc,step,jobs,fixed_order=True):
2502 """ 2503 For (N)LO+PS: determines the number of events and/or the required 2504 accuracy per job. 2505 For fixed order: determines which jobs need higher precision and 2506 returns those with the newly requested precision. 2507 """ 2508 err=self.cross_sect_dict['errt'] 2509 tot=self.cross_sect_dict['xsect'] 2510 errABS=self.cross_sect_dict['erra'] 2511 totABS=self.cross_sect_dict['xseca'] 2512 jobs_new=[] 2513 if fixed_order: 2514 if req_acc == -1: 2515 if step+1 == 1: 2516 npoints = self.run_card['npoints_FO'] 2517 niters = self.run_card['niters_FO'] 2518 for job in jobs: 2519 job['mint_mode']=-1 2520 job['niters']=niters 2521 job['npoints']=npoints 2522 jobs_new.append(job) 2523 elif step+1 == 2: 2524 pass 2525 elif step+1 > 2: 2526 raise aMCatNLOError('Cannot determine number of iterations and PS points '+ 2527 'for integration step %i' % step ) 2528 elif ( req_acc > 0 and err/abs(tot) > req_acc*1.2 ) or step <= 0: 2529 req_accABS=req_acc*abs(tot)/totABS # overal relative required accuracy on ABS Xsec. 2530 for job in jobs: 2531 # skip jobs with 0 xsec 2532 if job['resultABS'] == 0.: 2533 continue 2534 job['mint_mode']=-1 2535 # Determine relative required accuracy on the ABS for this job 2536 job['accuracy']=req_accABS*math.sqrt(totABS/job['resultABS']) 2537 # If already accurate enough, skip the job (except when doing the first 2538 # step for the iappl=2 run: we need to fill all the applgrid grids!) 2539 if (job['accuracy'] > job['errorABS']/job['resultABS'] and step != 0) \ 2540 and not (step==-1 and self.run_card['iappl'] == 2): 2541 continue 2542 # Update the number of PS points based on errorABS, ncall and accuracy 2543 itmax_fl=job['niters_done']*math.pow(job['errorABS']/ 2544 (job['accuracy']*job['resultABS']),2) 2545 itmax_fl=itmax_fl*1.1 # add 10% to make sure to have enough 2546 if itmax_fl <= 4.0 : 2547 job['niters']=max(int(round(itmax_fl)),2) 2548 job['npoints']=job['npoints_done']*2 2549 elif itmax_fl > 4.0 and itmax_fl <= 16.0 : 2550 job['niters']=4 2551 job['npoints']=int(round(job['npoints_done']*itmax_fl/4.0))*2 2552 else: 2553 if itmax_fl > 100.0 : itmax_fl=50.0 2554 job['niters']=int(round(math.sqrt(itmax_fl))) 2555 job['npoints']=int(round(job['npoints_done']*itmax_fl/ 2556 round(math.sqrt(itmax_fl))))*2 2557 # Add the job to the list of jobs that need to be run 2558 jobs_new.append(job) 2559 return jobs_new 2560 elif step+1 <= 2: 2561 nevents=self.run_card['nevents'] 2562 # Total required accuracy for the upper bounding envelope 2563 if req_acc<0: 2564 req_acc2_inv=nevents 2565 else: 2566 req_acc2_inv=1/(req_acc*req_acc) 2567 if step+1 == 1 or step+1 == 2 : 2568 # determine the req. accuracy for each of the jobs for Mint-step = 1 2569 for job in jobs: 2570 accuracy=min(math.sqrt(totABS/(req_acc2_inv*job['resultABS'])),0.2) 2571 job['accuracy']=accuracy 2572 if step+1 == 2: 2573 # Randomly (based on the relative ABS Xsec of the job) determine the 2574 # number of events each job needs to generate for MINT-step = 2. 2575 r=self.get_randinit_seed() 2576 random.seed(r) 2577 totevts=nevents 2578 for job in jobs: 2579 job['nevents'] = 0 2580 while totevts : 2581 target = random.random() * totABS 2582 crosssum = 0. 2583 i = 0 2584 while i<len(jobs) and crosssum < target: 2585 job = jobs[i] 2586 crosssum += job['resultABS'] 2587 i += 1 2588 totevts -= 1 2589 i -= 1 2590 jobs[i]['nevents'] += 1 2591 for job in jobs: 2592 job['mint_mode']=step+1 # next step 2593 return jobs 2594 else: 2595 return []
2596 2597
2598 - def get_randinit_seed(self):
2599 """ Get the random number seed from the randinit file """ 2600 with open(pjoin(self.me_dir,"SubProcesses","randinit")) as randinit: 2601 # format of the file is "r=%d". 2602 iseed = int(randinit.read()[2:]) 2603 return iseed
2604 2605
2606 - def append_the_results(self,jobs,integration_step):
2607 """Appends the results for each of the jobs in the job list""" 2608 error_found=False 2609 for job in jobs: 2610 try: 2611 if integration_step >= 0 : 2612 with open(pjoin(job['dirname'],'res_%s.dat' % integration_step)) as res_file: 2613 results=res_file.readline().split() 2614 else: 2615 # should only be here when doing fixed order with the 'only_generation' 2616 # option equal to True. Take the results from the final run done. 2617 with open(pjoin(job['dirname'],'res.dat')) as res_file: 2618 results=res_file.readline().split() 2619 except IOError: 2620 if not error_found: 2621 error_found=True 2622 error_log=[] 2623 error_log.append(pjoin(job['dirname'],'log.txt')) 2624 continue 2625 job['resultABS']=float(results[0]) 2626 job['errorABS']=float(results[1]) 2627 job['result']=float(results[2]) 2628 job['error']=float(results[3]) 2629 job['niters_done']=int(results[4]) 2630 job['npoints_done']=int(results[5]) 2631 job['time_spend']=float(results[6]) 2632 if job['resultABS'] != 0: 2633 job['err_percABS'] = job['errorABS']/job['resultABS']*100. 2634 job['err_perc'] = job['error']/job['result']*100. 2635 else: 2636 job['err_percABS'] = 0. 2637 job['err_perc'] = 0. 2638 if error_found: 2639 raise aMCatNLOError('An error occurred during the collection of results.\n' + 2640 'Please check the .log files inside the directories which failed:\n' + 2641 '\n'.join(error_log)+'\n')
2642 2643 2644
2645 - def write_res_txt_file(self,jobs,integration_step):
2646 """writes the res.txt files in the SubProcess dir""" 2647 jobs.sort(key = lambda job: -job['errorABS']) 2648 content=[] 2649 content.append('\n\nCross section per integration channel:') 2650 for job in jobs: 2651 content.append('%(p_dir)20s %(channel)15s %(result)10.8e %(error)6.4e %(err_perc)6.4f%% ' % job) 2652 content.append('\n\nABS cross section per integration channel:') 2653 for job in jobs: 2654 content.append('%(p_dir)20s %(channel)15s %(resultABS)10.8e %(errorABS)6.4e %(err_percABS)6.4f%% ' % job) 2655 # print also statistics for each directory 2656 dir_dict={} 2657 for job in jobs: 2658 try: 2659 dir_dict[job['p_dir']]['result'] += job['result']*job['wgt_frac'] 2660 dir_dict[job['p_dir']]['resultABS'] += job['resultABS']*job['wgt_frac'] 2661 # store the error ^2 2662 dir_dict[job['p_dir']]['error'] += math.pow(job['error'], 2)*job['wgt_frac'] 2663 dir_dict[job['p_dir']]['errorABS'] += math.pow(job['errorABS'], 2)*job['wgt_frac'] 2664 except KeyError: 2665 dir_dict[job['p_dir']] = { 2666 'result' : job['result']*job['wgt_frac'], 2667 'resultABS' : job['resultABS']*job['wgt_frac'], 2668 'error' : math.pow(job['error'], 2)*job['wgt_frac'], 2669 'errorABS' : math.pow(job['errorABS'], 2)*job['wgt_frac']} 2670 2671 for dir_res in dir_dict.values(): 2672 dir_res['error'] = math.sqrt(dir_res['error']) 2673 dir_res['errorABS'] = math.sqrt(dir_res['errorABS']) 2674 content.append('\n\nABS cross section per dir') 2675 for ddir, res in dir_dict.items(): 2676 content.append(('%20s' % ddir) + ' %(resultABS)10.8e %(errorABS)6.4e ' % res) 2677 content.append('\n\nCross section per dir') 2678 for ddir, res in dir_dict.items(): 2679 content.append(('%20s' % ddir) + ' %(result)10.8e %(error)6.4e ' % res) 2680 2681 totABS=0 2682 errABS=0 2683 tot=0 2684 err=0 2685 for job in jobs: 2686 totABS+= job['resultABS']*job['wgt_frac'] 2687 errABS+= math.pow(job['errorABS'],2)*job['wgt_frac'] 2688 tot+= job['result']*job['wgt_frac'] 2689 err+= math.pow(job['error'],2)*job['wgt_frac'] 2690 if jobs: 2691 content.append('\nTotal ABS and \nTotal: \n %10.8e +- %6.4e (%6.4e%%)\n %10.8e +- %6.4e (%6.4e%%) \n' %\ 2692 (totABS, math.sqrt(errABS), math.sqrt(errABS)/totABS *100.,\ 2693 tot, math.sqrt(err), math.sqrt(err)/tot *100.)) 2694 with open(pjoin(self.me_dir,'SubProcesses','res_%s.txt' % integration_step),'w') as res_file: 2695 res_file.write('\n'.join(content)) 2696 randinit=self.get_randinit_seed() 2697 return {'xsect':tot,'xseca':totABS,'errt':math.sqrt(err),\ 2698 'erra':math.sqrt(errABS),'randinit':randinit}
2699 2700
2701 - def collect_scale_pdf_info(self,options,jobs):
2702 """read the scale_pdf_dependence.dat files and collects there results""" 2703 scale_pdf_info=[] 2704 if any(self.run_card['reweight_scale']) or any(self.run_card['reweight_PDF']) or \ 2705 len(self.run_card['dynamical_scale_choice']) > 1 or len(self.run_card['lhaid']) > 1: 2706 evt_files=[] 2707 evt_wghts=[] 2708 for job in jobs: 2709 evt_files.append(pjoin(job['dirname'],'scale_pdf_dependence.dat')) 2710 evt_wghts.append(job['wgt_frac']) 2711 scale_pdf_info = self.pdf_scale_from_reweighting(evt_files,evt_wghts) 2712 return scale_pdf_info
2713 2714
2715 - def combine_plots_FO(self,folder_name,jobs):
2716 """combines the plots and puts then in the Events/run* directory""" 2717 devnull = open(os.devnull, 'w') 2718 2719 if self.analyse_card['fo_analysis_format'].lower() == 'topdrawer': 2720 topfiles = [] 2721 for job in jobs: 2722 if job['dirname'].endswith('.top'): 2723 topfiles.append(job['dirname']) 2724 else: 2725 topfiles.append(pjoin(job['dirname'],'MADatNLO.top')) 2726 misc.call(['./combine_plots_FO.sh'] + topfiles, \ 2727 stdout=devnull, 2728 cwd=pjoin(self.me_dir, 'SubProcesses')) 2729 files.cp(pjoin(self.me_dir, 'SubProcesses', 'MADatNLO.top'), 2730 pjoin(self.me_dir, 'Events', self.run_name)) 2731 logger.info('The results of this run and the TopDrawer file with the plots' + \ 2732 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 2733 elif self.analyse_card['fo_analysis_format'].lower() == 'hwu': 2734 out=pjoin(self.me_dir,'Events',self.run_name,'MADatNLO') 2735 self.combine_plots_HwU(jobs,out) 2736 try: 2737 misc.call(['gnuplot','MADatNLO.gnuplot'],\ 2738 stdout=devnull,stderr=devnull,\ 2739 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 2740 except Exception: 2741 pass 2742 logger.info('The results of this run and the HwU and GnuPlot files with the plots' + \ 2743 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 2744 elif self.analyse_card['fo_analysis_format'].lower() == 'root': 2745 rootfiles = [] 2746 for job in jobs: 2747 if job['dirname'].endswith('.root'): 2748 rootfiles.append(job['dirname']) 2749 else: 2750 rootfiles.append(pjoin(job['dirname'],'MADatNLO.root')) 2751 misc.call(['./combine_root.sh'] + folder_name + rootfiles, \ 2752 stdout=devnull, 2753 cwd=pjoin(self.me_dir, 'SubProcesses')) 2754 files.cp(pjoin(self.me_dir, 'SubProcesses', 'MADatNLO.root'), 2755 pjoin(self.me_dir, 'Events', self.run_name)) 2756 logger.info('The results of this run and the ROOT file with the plots' + \ 2757 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 2758 elif self.analyse_card['fo_analysis_format'].lower() == 'lhe': 2759 self.combine_FO_lhe(jobs) 2760 logger.info('The results of this run and the LHE File (to be used for plotting only)' + \ 2761 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 2762 else: 2763 logger.info('The results of this run' + \ 2764 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name))
2765
2766 - def combine_FO_lhe(self,jobs):
2767 """combine the various lhe file generated in each directory. 2768 They are two steps: 2769 1) banner 2770 2) reweight each sample by the factor written at the end of each file 2771 3) concatenate each of the new files (gzip those). 2772 """ 2773 2774 logger.info('Combining lhe events for plotting analysis') 2775 start = time.time() 2776 self.run_card['fo_lhe_postprocessing'] = [i.lower() for i in self.run_card['fo_lhe_postprocessing']] 2777 output = pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz') 2778 if os.path.exists(output): 2779 os.remove(output) 2780 2781 2782 2783 2784 # 1. write the banner 2785 text = open(pjoin(jobs[0]['dirname'],'header.txt'),'r').read() 2786 i1, i2 = text.find('<initrwgt>'),text.find('</initrwgt>') 2787 self.banner['initrwgt'] = text[10+i1:i2] 2788 # 2789 # <init> 2790 # 2212 2212 6.500000e+03 6.500000e+03 0 0 247000 247000 -4 1 2791 # 8.430000e+02 2.132160e+00 8.430000e+02 1 2792 # <generator name='MadGraph5_aMC@NLO' version='2.5.2'>please cite 1405.0301 </generator> 2793 # </init> 2794 2795 cross = sum(j['result'] for j in jobs) 2796 error = math.sqrt(sum(j['error'] for j in jobs)) 2797 self.banner['init'] = "0 0 0e0 0e0 0 0 0 0 -4 1\n %s %s %s 1" % (cross, error, cross) 2798 self.banner.write(output[:-3], close_tag=False) 2799 misc.gzip(output[:-3]) 2800 2801 2802 2803 fsock = lhe_parser.EventFile(output,'a') 2804 if 'nogrouping' in self.run_card['fo_lhe_postprocessing']: 2805 fsock.eventgroup = False 2806 else: 2807 fsock.eventgroup = True 2808 2809 if 'norandom' in self.run_card['fo_lhe_postprocessing']: 2810 for job in jobs: 2811 dirname = job['dirname'] 2812 #read last line 2813 lastline = misc.BackRead(pjoin(dirname,'events.lhe')).readline() 2814 nb_event, sumwgt, cross = [float(i) for i in lastline.split()] 2815 # get normalisation ratio 2816 ratio = cross/sumwgt 2817 lhe = lhe_parser.EventFile(pjoin(dirname,'events.lhe')) 2818 lhe.eventgroup = True # read the events by eventgroup 2819 for eventsgroup in lhe: 2820 neweventsgroup = [] 2821 for i,event in enumerate(eventsgroup): 2822 event.rescale_weights(ratio) 2823 if i>0 and 'noidentification' not in self.run_card['fo_lhe_postprocessing'] \ 2824 and event == neweventsgroup[-1]: 2825 neweventsgroup[-1].wgt += event.wgt 2826 for key in event.reweight_data: 2827 neweventsgroup[-1].reweight_data[key] += event.reweight_data[key] 2828 else: 2829 neweventsgroup.append(event) 2830 fsock.write_events(neweventsgroup) 2831 lhe.close() 2832 os.remove(pjoin(dirname,'events.lhe')) 2833 else: 2834 lhe = [] 2835 lenlhe = [] 2836 misc.sprint('Need to combine %s event files' % len(jobs)) 2837 globallhe = lhe_parser.MultiEventFile() 2838 globallhe.eventgroup = True 2839 for job in jobs: 2840 dirname = job['dirname'] 2841 lastline = misc.BackRead(pjoin(dirname,'events.lhe')).readline() 2842 nb_event, sumwgt, cross = [float(i) for i in lastline.split()] 2843 lastlhe = globallhe.add(pjoin(dirname,'events.lhe'),cross, 0, cross, 2844 nb_event=int(nb_event), scale=cross/sumwgt) 2845 for eventsgroup in globallhe: 2846 neweventsgroup = [] 2847 for i,event in enumerate(eventsgroup): 2848 event.rescale_weights(event.sample_scale) 2849 if i>0 and 'noidentification' not in self.run_card['fo_lhe_postprocessing'] \ 2850 and event == neweventsgroup[-1]: 2851 neweventsgroup[-1].wgt += event.wgt 2852 for key in event.reweight_data: 2853 neweventsgroup[-1].reweight_data[key] += event.reweight_data[key] 2854 else: 2855 neweventsgroup.append(event) 2856 fsock.write_events(neweventsgroup) 2857 globallhe.close() 2858 fsock.write('</LesHouchesEvents>\n') 2859 fsock.close() 2860 misc.sprint('The combining of the LHE files has taken ', time.time()-start) 2861 for job in jobs: 2862 dirname = job['dirname'] 2863 os.remove(pjoin(dirname,'events.lhe')) 2864 2865 2866 2867 misc.sprint('The combining of the LHE files has taken ', time.time()-start)
2868 2869 2870 2871 2872 2873
2874 - def combine_plots_HwU(self,jobs,out,normalisation=None):
2875 """Sums all the plots in the HwU format.""" 2876 logger.debug('Combining HwU plots.') 2877 2878 command = [] 2879 command.append(pjoin(self.me_dir, 'bin', 'internal','histograms.py')) 2880 for job in jobs: 2881 if job['dirname'].endswith('.HwU'): 2882 command.append(job['dirname']) 2883 else: 2884 command.append(pjoin(job['dirname'],'MADatNLO.HwU')) 2885 command.append("--out="+out) 2886 command.append("--gnuplot") 2887 command.append("--band=[]") 2888 command.append("--lhapdf-config="+self.options['lhapdf']) 2889 if normalisation: 2890 command.append("--multiply="+(','.join([str(n) for n in normalisation]))) 2891 command.append("--sum") 2892 command.append("--keep_all_weights") 2893 command.append("--no_open") 2894 2895 p = misc.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, cwd=self.me_dir) 2896 2897 while p.poll() is None: 2898 line = p.stdout.readline() 2899 if any(t in line for t in ['INFO:','WARNING:','CRITICAL:','ERROR:','KEEP:']): 2900 print line[:-1] 2901 elif __debug__ and line: 2902 logger.debug(line[:-1])
2903 2904
2905 - def applgrid_combine(self,cross,error,jobs):
2906 """Combines the APPLgrids in all the SubProcess/P*/all_G*/ directories""" 2907 logger.debug('Combining APPLgrids \n') 2908 applcomb=pjoin(self.options['applgrid'].rstrip('applgrid-config'), 2909 'applgrid-combine') 2910 all_jobs=[] 2911 for job in jobs: 2912 all_jobs.append(job['dirname']) 2913 ngrids=len(all_jobs) 2914 nobs =len([name for name in os.listdir(all_jobs[0]) if name.endswith("_out.root")]) 2915 for obs in range(0,nobs): 2916 gdir = [pjoin(job,"grid_obs_"+str(obs)+"_out.root") for job in all_jobs] 2917 # combine APPLgrids from different channels for observable 'obs' 2918 if self.run_card["iappl"] == 1: 2919 misc.call([applcomb,'-o', pjoin(self.me_dir,"Events",self.run_name, 2920 "aMCfast_obs_"+str(obs)+"_starting_grid.root"), '--optimise']+ gdir) 2921 elif self.run_card["iappl"] == 2: 2922 unc2_inv=pow(cross/error,2) 2923 unc2_inv_ngrids=pow(cross/error,2)*ngrids 2924 misc.call([applcomb,'-o', pjoin(self.me_dir,"Events", 2925 self.run_name,"aMCfast_obs_"+str(obs)+".root"),'-s', 2926 str(unc2_inv),'--weight',str(unc2_inv)]+ gdir) 2927 for job in all_jobs: 2928 os.remove(pjoin(job,"grid_obs_"+str(obs)+"_in.root")) 2929 else: 2930 raise aMCatNLOError('iappl parameter can only be 0, 1 or 2') 2931 # after combining, delete the original grids 2932 for ggdir in gdir: 2933 os.remove(ggdir)
2934 2935
2936 - def applgrid_distribute(self,options,mode,p_dirs):
2937 """Distributes the APPLgrids ready to be filled by a second run of the code""" 2938 # if no appl_start_grid argument given, guess it from the time stamps 2939 # of the starting grid files 2940 if not('appl_start_grid' in options.keys() and options['appl_start_grid']): 2941 gfiles = misc.glob(pjoin('*', 'aMCfast_obs_0_starting_grid.root'), 2942 pjoin(self.me_dir,'Events')) 2943 2944 time_stamps={} 2945 for root_file in gfiles: 2946 time_stamps[root_file]=os.path.getmtime(root_file) 2947 options['appl_start_grid']= \ 2948 max(time_stamps.iterkeys(), key=(lambda key: 2949 time_stamps[key])).split('/')[-2] 2950 logger.info('No --appl_start_grid option given. '+\ 2951 'Guessing that starting_grid from run "%s" should be used.' \ 2952 % options['appl_start_grid']) 2953 2954 if 'appl_start_grid' in options.keys() and options['appl_start_grid']: 2955 self.appl_start_grid = options['appl_start_grid'] 2956 start_grid_dir=pjoin(self.me_dir, 'Events', self.appl_start_grid) 2957 # check that this dir exists and at least one grid file is there 2958 if not os.path.exists(pjoin(start_grid_dir, 2959 'aMCfast_obs_0_starting_grid.root')): 2960 raise self.InvalidCmd('APPLgrid file not found: %s' % \ 2961 pjoin(start_grid_dir,'aMCfast_obs_0_starting_grid.root')) 2962 else: 2963 all_grids=[pjoin(start_grid_dir,name) for name in os.listdir( \ 2964 start_grid_dir) if name.endswith("_starting_grid.root")] 2965 nobs =len(all_grids) 2966 gstring=" ".join(all_grids) 2967 if not hasattr(self, 'appl_start_grid') or not self.appl_start_grid: 2968 raise self.InvalidCmd('No APPLgrid name currently defined.'+ 2969 'Please provide this information.') 2970 #copy the grid to all relevant directories 2971 for pdir in p_dirs: 2972 g_dirs = [file for file in os.listdir(pjoin(self.me_dir, 2973 "SubProcesses",pdir)) if file.startswith(mode+'_G') and 2974 os.path.isdir(pjoin(self.me_dir,"SubProcesses",pdir, file))] 2975 for g_dir in g_dirs: 2976 for grid in all_grids: 2977 obs=grid.split('_')[-3] 2978 files.cp(grid,pjoin(self.me_dir,"SubProcesses",pdir,g_dir, 2979 'grid_obs_'+obs+'_in.root'))
2980 2981 2982 2983
2984 - def collect_log_files(self, jobs, integration_step):
2985 """collect the log files and put them in a single, html-friendly file 2986 inside the Events/run_.../ directory""" 2987 log_file = pjoin(self.me_dir, 'Events', self.run_name, 2988 'alllogs_%d.html' % integration_step) 2989 outfile = open(log_file, 'w') 2990 2991 content = '' 2992 content += '<HTML><BODY>\n<font face="courier" size=2>' 2993 for job in jobs: 2994 # put an anchor 2995 log=pjoin(job['dirname'],'log_MINT%s.txt' % integration_step) 2996 content += '<a name=%s></a>\n' % (os.path.dirname(log).replace( 2997 pjoin(self.me_dir,'SubProcesses'),'')) 2998 # and put some nice header 2999 content += '<font color="red">\n' 3000 content += '<br>LOG file for integration channel %s, %s <br>' % \ 3001 (os.path.dirname(log).replace(pjoin(self.me_dir, 3002 'SubProcesses'), ''), 3003 integration_step) 3004 content += '</font>\n' 3005 #then just flush the content of the small log inside the big log 3006 #the PRE tag prints everything verbatim 3007 with open(log) as l: 3008 content += '<PRE>\n' + l.read() + '\n</PRE>' 3009 content +='<br>\n' 3010 outfile.write(content) 3011 content='' 3012 3013 outfile.write('</font>\n</BODY></HTML>\n') 3014 outfile.close()
3015 3016
3017 - def finalise_run_FO(self,folder_name,jobs):
3018 """Combine the plots and put the res*.txt files in the Events/run.../ folder.""" 3019 # Copy the res_*.txt files to the Events/run* folder 3020 res_files = misc.glob('res_*.txt', pjoin(self.me_dir, 'SubProcesses')) 3021 for res_file in res_files: 3022 files.mv(res_file,pjoin(self.me_dir, 'Events', self.run_name)) 3023 # Collect the plots and put them in the Events/run* folder 3024 self.combine_plots_FO(folder_name,jobs) 3025 # If doing the applgrid-stuff, also combine those grids 3026 # and put those in the Events/run* folder 3027 if self.run_card['iappl'] != 0: 3028 cross=self.cross_sect_dict['xsect'] 3029 error=self.cross_sect_dict['errt'] 3030 self.applgrid_combine(cross,error,jobs)
3031 3032
3033 - def setup_cluster_or_multicore(self):
3034 """setup the number of cores for multicore, and the cluster-type for cluster runs""" 3035 if self.cluster_mode == 1: 3036 cluster_name = self.options['cluster_type'] 3037 try: 3038 self.cluster = cluster.from_name[cluster_name](**self.options) 3039 except KeyError: 3040 # Check if a plugin define this type of cluster 3041 # check for PLUGIN format 3042 cluster_class = misc.from_plugin_import(self.plugin_path, 3043 'new_cluster', cluster_name, 3044 info = 'cluster handling will be done with PLUGIN: %{plug}s' ) 3045 if cluster_class: 3046 self.cluster = cluster_class(**self.options) 3047 3048 if self.cluster_mode == 2: 3049 try: 3050 import multiprocessing 3051 if not self.nb_core: 3052 try: 3053 self.nb_core = int(self.options['nb_core']) 3054 except TypeError: 3055 self.nb_core = multiprocessing.cpu_count() 3056 logger.info('Using %d cores' % self.nb_core) 3057 except ImportError: 3058 self.nb_core = 1 3059 logger.warning('Impossible to detect the number of cores => Using one.\n'+ 3060 'Use set nb_core X in order to set this number and be able to '+ 3061 'run in multicore.') 3062 3063 self.cluster = cluster.MultiCore(**self.options)
3064 3065
3066 - def clean_previous_results(self,options,p_dirs,folder_name):
3067 """Clean previous results. 3068 o. If doing only the reweighting step, do not delete anything and return directlty. 3069 o. Always remove all the G*_* files (from split event generation). 3070 o. Remove the G* (or born_G* or all_G*) only when NOT doing only_generation or reweight_only.""" 3071 if options['reweightonly']: 3072 return 3073 if not options['only_generation']: 3074 self.update_status('Cleaning previous results', level=None) 3075 for dir in p_dirs: 3076 #find old folders to be removed 3077 for obj in folder_name: 3078 # list all the G* (or all_G* or born_G*) directories 3079 to_rm = [file for file in \ 3080 os.listdir(pjoin(self.me_dir, 'SubProcesses', dir)) \ 3081 if file.startswith(obj[:-1]) and \ 3082 (os.path.isdir(pjoin(self.me_dir, 'SubProcesses', dir, file)) or \ 3083 os.path.exists(pjoin(self.me_dir, 'SubProcesses', dir, file)))] 3084 # list all the G*_* directories (from split event generation) 3085 to_always_rm = [file for file in \ 3086 os.listdir(pjoin(self.me_dir, 'SubProcesses', dir)) \ 3087 if file.startswith(obj[:-1]) and 3088 '_' in file and not '_G' in file and \ 3089 (os.path.isdir(pjoin(self.me_dir, 'SubProcesses', dir, file)) or \ 3090 os.path.exists(pjoin(self.me_dir, 'SubProcesses', dir, file)))] 3091 3092 if not options['only_generation']: 3093 to_always_rm.extend(to_rm) 3094 if os.path.exists(pjoin(self.me_dir, 'SubProcesses', dir,'MadLoop5_resources.tar.gz')): 3095 to_always_rm.append(pjoin(self.me_dir, 'SubProcesses', dir,'MadLoop5_resources.tar.gz')) 3096 files.rm([pjoin(self.me_dir, 'SubProcesses', dir, d) for d in to_always_rm]) 3097 return
3098 3099
3100 - def print_summary(self, options, step, mode, scale_pdf_info=[], done=True):
3101 """print a summary of the results contained in self.cross_sect_dict. 3102 step corresponds to the mintMC step, if =2 (i.e. after event generation) 3103 some additional infos are printed""" 3104 # find process name 3105 proc_card_lines = open(pjoin(self.me_dir, 'Cards', 'proc_card_mg5.dat')).read().split('\n') 3106 process = '' 3107 for line in proc_card_lines: 3108 if line.startswith('generate') or line.startswith('add process'): 3109 process = process+(line.replace('generate ', '')).replace('add process ','')+' ; ' 3110 lpp = {0:'l', 1:'p', -1:'pbar'} 3111 if self.ninitial == 1: 3112 proc_info = '\n Process %s' % process[:-3] 3113 else: 3114 proc_info = '\n Process %s\n Run at %s-%s collider (%s + %s GeV)' % \ 3115 (process[:-3], lpp[self.run_card['lpp1']], lpp[self.run_card['lpp2']], 3116 self.run_card['ebeam1'], self.run_card['ebeam2']) 3117 3118 if self.ninitial == 1: 3119 self.cross_sect_dict['unit']='GeV' 3120 self.cross_sect_dict['xsec_string']='(Partial) decay width' 3121 self.cross_sect_dict['axsec_string']='(Partial) abs(decay width)' 3122 else: 3123 self.cross_sect_dict['unit']='pb' 3124 self.cross_sect_dict['xsec_string']='Total cross section' 3125 self.cross_sect_dict['axsec_string']='Total abs(cross section)' 3126 if self.run_card['event_norm'].lower()=='bias': 3127 self.cross_sect_dict['xsec_string']+=', incl. bias (DO NOT USE)' 3128 3129 if mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 3130 status = ['Determining the number of unweighted events per channel', 3131 'Updating the number of unweighted events per channel', 3132 'Summary:'] 3133 computed='(computed from LHE events)' 3134 elif mode in ['NLO', 'LO']: 3135 status = ['Results after grid setup:','Current results:', 3136 'Final results and run summary:'] 3137 computed='(computed from histogram information)' 3138 3139 if step != 2 and mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 3140 message = status[step] + '\n\n Intermediate results:' + \ 3141 ('\n Random seed: %(randinit)d' + \ 3142 '\n %(xsec_string)s: %(xsect)8.3e +- %(errt)6.1e %(unit)s' + \ 3143 '\n %(axsec_string)s: %(xseca)8.3e +- %(erra)6.1e %(unit)s \n') \ 3144 % self.cross_sect_dict 3145 elif mode in ['NLO','LO'] and not done: 3146 if step == 0: 3147 message = '\n ' + status[0] + \ 3148 '\n %(xsec_string)s: %(xsect)8.3e +- %(errt)6.1e %(unit)s' % \ 3149 self.cross_sect_dict 3150 else: 3151 message = '\n ' + status[1] + \ 3152 '\n %(xsec_string)s: %(xsect)8.3e +- %(errt)6.1e %(unit)s' % \ 3153 self.cross_sect_dict 3154 3155 else: 3156 message = '\n --------------------------------------------------------------' 3157 message = message + \ 3158 '\n ' + status[2] + proc_info 3159 if mode not in ['LO', 'NLO']: 3160 message = message + \ 3161 '\n Number of events generated: %s' % self.run_card['nevents'] 3162 message = message + \ 3163 '\n %(xsec_string)s: %(xsect)8.3e +- %(errt)6.1e %(unit)s' % \ 3164 self.cross_sect_dict 3165 message = message + \ 3166 '\n --------------------------------------------------------------' 3167 if scale_pdf_info and (self.run_card['nevents']>=10000 or mode in ['NLO', 'LO']): 3168 if scale_pdf_info[0]: 3169 # scale uncertainties 3170 message = message + '\n Scale variation %s:' % computed 3171 for s in scale_pdf_info[0]: 3172 if s['unc']: 3173 if self.run_card['ickkw'] != -1: 3174 message = message + \ 3175 ('\n Dynamical_scale_choice %(label)i (envelope of %(size)s values): '\ 3176 '\n %(cen)8.3e pb +%(max)0.1f%% -%(min)0.1f%%') % s 3177 else: 3178 message = message + \ 3179 ('\n Soft and hard scale dependence (added in quadrature): '\ 3180 '\n %(cen)8.3e pb +%(max_q)0.1f%% -%(min_q)0.1f%%') % s 3181 3182 else: 3183 message = message + \ 3184 ('\n Dynamical_scale_choice %(label)i: '\ 3185 '\n %(cen)8.3e pb') % s 3186 3187 if scale_pdf_info[1]: 3188 message = message + '\n PDF variation %s:' % computed 3189 for p in scale_pdf_info[1]: 3190 if p['unc']=='none': 3191 message = message + \ 3192 ('\n %(name)s (central value only): '\ 3193 '\n %(cen)8.3e pb') % p 3194 3195 elif p['unc']=='unknown': 3196 message = message + \ 3197 ('\n %(name)s (%(size)s members; combination method unknown): '\ 3198 '\n %(cen)8.3e pb') % p 3199 else: 3200 message = message + \ 3201 ('\n %(name)s (%(size)s members; using %(unc)s method): '\ 3202 '\n %(cen)8.3e pb +%(max)0.1f%% -%(min)0.1f%%') % p 3203 # pdf uncertainties 3204 message = message + \ 3205 '\n --------------------------------------------------------------' 3206 3207 3208 if (mode in ['NLO', 'LO'] and not done) or \ 3209 (mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO'] and step!=2): 3210 logger.info(message+'\n') 3211 return 3212 3213 # Some advanced general statistics are shown in the debug message at the 3214 # end of the run 3215 # Make sure it never stops a run 3216 # Gather some basic statistics for the run and extracted from the log files. 3217 if mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 3218 log_GV_files = misc.glob(pjoin('P*','G*','log_MINT*.txt'), 3219 pjoin(self.me_dir, 'SubProcesses')) 3220 all_log_files = log_GV_files 3221 elif mode == 'NLO': 3222 log_GV_files = misc.glob(pjoin('P*','all_G*','log_MINT*.txt'), 3223 pjoin(self.me_dir, 'SubProcesses')) 3224 all_log_files = log_GV_files 3225 3226 elif mode == 'LO': 3227 log_GV_files = '' 3228 all_log_files = misc.glob(pjoin('P*','born_G*','log_MINT*.txt'), 3229 pjoin(self.me_dir, 'SubProcesses')) 3230 else: 3231 raise aMCatNLOError, 'Run mode %s not supported.'%mode 3232 3233 try: 3234 message, debug_msg = \ 3235 self.compile_advanced_stats(log_GV_files, all_log_files, message) 3236 except Exception as e: 3237 debug_msg = 'Advanced statistics collection failed with error "%s"\n'%str(e) 3238 err_string = StringIO.StringIO() 3239 traceback.print_exc(limit=4, file=err_string) 3240 debug_msg += 'Please report this backtrace to a MG5_aMC developer:\n%s'\ 3241 %err_string.getvalue() 3242 3243 logger.debug(debug_msg+'\n') 3244 logger.info(message+'\n') 3245 3246 # Now copy relevant information in the Events/Run_<xxx> directory 3247 evt_path = pjoin(self.me_dir, 'Events', self.run_name) 3248 open(pjoin(evt_path, 'summary.txt'),'w').write(message+'\n') 3249 open(pjoin(evt_path, '.full_summary.txt'), 3250 'w').write(message+'\n\n'+debug_msg+'\n') 3251 3252 self.archive_files(evt_path,mode)
3253
3254 - def archive_files(self, evt_path, mode):
3255 """ Copies in the Events/Run_<xxx> directory relevant files characterizing 3256 the run.""" 3257 3258 files_to_arxiv = [pjoin('Cards','param_card.dat'), 3259 pjoin('Cards','MadLoopParams.dat'), 3260 pjoin('Cards','FKS_params.dat'), 3261 pjoin('Cards','run_card.dat'), 3262 pjoin('Subprocesses','setscales.f'), 3263 pjoin('Subprocesses','cuts.f')] 3264 3265 if mode in ['NLO', 'LO']: 3266 files_to_arxiv.append(pjoin('Cards','FO_analyse_card.dat')) 3267 3268 if not os.path.exists(pjoin(evt_path,'RunMaterial')): 3269 os.mkdir(pjoin(evt_path,'RunMaterial')) 3270 3271 for path in files_to_arxiv: 3272 if os.path.isfile(pjoin(self.me_dir,path)): 3273 files.cp(pjoin(self.me_dir,path),pjoin(evt_path,'RunMaterial')) 3274 misc.call(['tar','-czpf','RunMaterial.tar.gz','RunMaterial'],cwd=evt_path) 3275 shutil.rmtree(pjoin(evt_path,'RunMaterial'))
3276
3277 - def compile_advanced_stats(self,log_GV_files,all_log_files,message):
3278 """ This functions goes through the log files given in arguments and 3279 compiles statistics about MadLoop stability, virtual integration 3280 optimization and detection of potential error messages into a nice 3281 debug message to printed at the end of the run """ 3282 3283 def safe_float(str_float): 3284 try: 3285 return float(str_float) 3286 except ValueError: 3287 logger.debug('Could not convert the following float during'+ 3288 ' advanced statistics printout: %s'%str(str_float)) 3289 return -1.0
3290 3291 3292 # > UPS is a dictionary of tuples with this format {channel:[nPS,nUPS]} 3293 # > Errors is a list of tuples with this format (log_file,nErrors) 3294 stats = {'UPS':{}, 'Errors':[], 'virt_stats':{}, 'timings':{}} 3295 mint_search = re.compile(r"MINT(?P<ID>\d*).txt") 3296 3297 # ================================== 3298 # == MadLoop stability statistics == 3299 # ================================== 3300 3301 # Recuperate the fraction of unstable PS points found in the runs for 3302 # the virtuals 3303 UPS_stat_finder = re.compile( 3304 r"Satistics from MadLoop:.*"+\ 3305 r"Total points tried\:\s+(?P<ntot>\d+).*"+\ 3306 r"Stability unknown\:\s+(?P<nsun>\d+).*"+\ 3307 r"Stable PS point\:\s+(?P<nsps>\d+).*"+\ 3308 r"Unstable PS point \(and rescued\)\:\s+(?P<nups>\d+).*"+\ 3309 r"Exceptional PS point \(unstable and not rescued\)\:\s+(?P<neps>\d+).*"+\ 3310 r"Double precision used\:\s+(?P<nddp>\d+).*"+\ 3311 r"Quadruple precision used\:\s+(?P<nqdp>\d+).*"+\ 3312 r"Initialization phase\-space points\:\s+(?P<nini>\d+).*"+\ 3313 r"Unknown return code \(100\)\:\s+(?P<n100>\d+).*"+\ 3314 r"Unknown return code \(10\)\:\s+(?P<n10>\d+).*",re.DOTALL) 3315 3316 unit_code_meaning = { 0 : 'Not identified (CTModeRun != -1)', 3317 1 : 'CutTools (double precision)', 3318 2 : 'PJFry++', 3319 3 : 'IREGI', 3320 4 : 'Golem95', 3321 5 : 'Samurai', 3322 6 : 'Ninja (double precision)', 3323 7 : 'COLLIER', 3324 8 : 'Ninja (quadruple precision)', 3325 9 : 'CutTools (quadruple precision)'} 3326 RetUnit_finder =re.compile( 3327 r"#Unit\s*(?P<unit>\d+)\s*=\s*(?P<n_occurences>\d+)") 3328 #Unit 3329 3330 for gv_log in log_GV_files: 3331 channel_name = '/'.join(gv_log.split('/')[-5:-1]) 3332 log=open(gv_log,'r').read() 3333 UPS_stats = re.search(UPS_stat_finder,log) 3334 for retunit_stats in re.finditer(RetUnit_finder, log): 3335 if channel_name not in stats['UPS'].keys(): 3336 stats['UPS'][channel_name] = [0]*10+[[0]*10] 3337 stats['UPS'][channel_name][10][int(retunit_stats.group('unit'))] \ 3338 += int(retunit_stats.group('n_occurences')) 3339 if not UPS_stats is None: 3340 try: 3341 stats['UPS'][channel_name][0] += int(UPS_stats.group('ntot')) 3342 stats['UPS'][channel_name][1] += int(UPS_stats.group('nsun')) 3343 stats['UPS'][channel_name][2] += int(UPS_stats.group('nsps')) 3344 stats['UPS'][channel_name][3] += int(UPS_stats.group('nups')) 3345 stats['UPS'][channel_name][4] += int(UPS_stats.group('neps')) 3346 stats['UPS'][channel_name][5] += int(UPS_stats.group('nddp')) 3347 stats['UPS'][channel_name][6] += int(UPS_stats.group('nqdp')) 3348 stats['UPS'][channel_name][7] += int(UPS_stats.group('nini')) 3349 stats['UPS'][channel_name][8] += int(UPS_stats.group('n100')) 3350 stats['UPS'][channel_name][9] += int(UPS_stats.group('n10')) 3351 except KeyError: 3352 stats['UPS'][channel_name] = [int(UPS_stats.group('ntot')), 3353 int(UPS_stats.group('nsun')),int(UPS_stats.group('nsps')), 3354 int(UPS_stats.group('nups')),int(UPS_stats.group('neps')), 3355 int(UPS_stats.group('nddp')),int(UPS_stats.group('nqdp')), 3356 int(UPS_stats.group('nini')),int(UPS_stats.group('n100')), 3357 int(UPS_stats.group('n10')),[0]*10] 3358 debug_msg = "" 3359 if len(stats['UPS'].keys())>0: 3360 nTotPS = sum([chan[0] for chan in stats['UPS'].values()],0) 3361 nTotsun = sum([chan[1] for chan in stats['UPS'].values()],0) 3362 nTotsps = sum([chan[2] for chan in stats['UPS'].values()],0) 3363 nTotups = sum([chan[3] for chan in stats['UPS'].values()],0) 3364 nToteps = sum([chan[4] for chan in stats['UPS'].values()],0) 3365 nTotddp = sum([chan[5] for chan in stats['UPS'].values()],0) 3366 nTotqdp = sum([chan[6] for chan in stats['UPS'].values()],0) 3367 nTotini = sum([chan[7] for chan in stats['UPS'].values()],0) 3368 nTot100 = sum([chan[8] for chan in stats['UPS'].values()],0) 3369 nTot10 = sum([chan[9] for chan in stats['UPS'].values()],0) 3370 nTot1 = [sum([chan[10][i] for chan in stats['UPS'].values()],0) \ 3371 for i in range(10)] 3372 UPSfracs = [(chan[0] , 0.0 if chan[1][0]==0 else \ 3373 safe_float(chan[1][4]*100)/chan[1][0]) for chan in stats['UPS'].items()] 3374 maxUPS = max(UPSfracs, key = lambda w: w[1]) 3375 3376 tmpStr = "" 3377 tmpStr += '\n Number of loop ME evaluations (by MadLoop): %d'%nTotPS 3378 tmpStr += '\n Stability unknown: %d'%nTotsun 3379 tmpStr += '\n Stable PS point: %d'%nTotsps 3380 tmpStr += '\n Unstable PS point (and rescued): %d'%nTotups 3381 tmpStr += '\n Unstable PS point (and not rescued): %d'%nToteps 3382 tmpStr += '\n Only double precision used: %d'%nTotddp 3383 tmpStr += '\n Quadruple precision used: %d'%nTotqdp 3384 tmpStr += '\n Initialization phase-space points: %d'%nTotini 3385 tmpStr += '\n Reduction methods used:' 3386 red_methods = [(unit_code_meaning[i],nTot1[i]) for i in \ 3387 unit_code_meaning.keys() if nTot1[i]>0] 3388 for method, n in sorted(red_methods, key= lambda l: l[1], reverse=True): 3389 tmpStr += '\n > %s%s%s'%(method,' '*(33-len(method)),n) 3390 if nTot100 != 0: 3391 debug_msg += '\n Unknown return code (100): %d'%nTot100 3392 if nTot10 != 0: 3393 debug_msg += '\n Unknown return code (10): %d'%nTot10 3394 nUnknownUnit = sum(nTot1[u] for u in range(10) if u \ 3395 not in unit_code_meaning.keys()) 3396 if nUnknownUnit != 0: 3397 debug_msg += '\n Unknown return code (1): %d'\ 3398 %nUnknownUnit 3399 3400 if maxUPS[1]>0.001: 3401 message += tmpStr 3402 message += '\n Total number of unstable PS point detected:'+\ 3403 ' %d (%4.2f%%)'%(nToteps,safe_float(100*nToteps)/nTotPS) 3404 message += '\n Maximum fraction of UPS points in '+\ 3405 'channel %s (%4.2f%%)'%maxUPS 3406 message += '\n Please report this to the authors while '+\ 3407 'providing the file' 3408 message += '\n %s'%str(pjoin(os.path.dirname(self.me_dir), 3409 maxUPS[0],'UPS.log')) 3410 else: 3411 debug_msg += tmpStr 3412 3413 3414 # ==================================================== 3415 # == aMC@NLO virtual integration optimization stats == 3416 # ==================================================== 3417 3418 virt_tricks_finder = re.compile( 3419 r"accumulated results Virtual ratio\s*=\s*-?(?P<v_ratio>[\d\+-Eed\.]*)"+\ 3420 r"\s*\+/-\s*-?[\d\+-Eed\.]*\s*\(\s*-?(?P<v_ratio_err>[\d\+-Eed\.]*)\s*\%\)\s*\n"+\ 3421 r"accumulated results ABS virtual\s*=\s*-?(?P<v_abs_contr>[\d\+-Eed\.]*)"+\ 3422 r"\s*\+/-\s*-?[\d\+-Eed\.]*\s*\(\s*-?(?P<v_abs_contr_err>[\d\+-Eed\.]*)\s*\%\)") 3423 3424 virt_frac_finder = re.compile(r"update virtual fraction to\s*:\s*"+\ 3425 "-?(?P<v_frac>[\d\+-Eed\.]*)\s*") 3426 3427 channel_contr_finder = re.compile(r"Final result \[ABS\]\s*:\s*-?(?P<v_contr>[\d\+-Eed\.]*)") 3428 3429 channel_contr_list = {} 3430 for gv_log in log_GV_files: 3431 logfile=open(gv_log,'r') 3432 log = logfile.read() 3433 logfile.close() 3434 channel_name = '/'.join(gv_log.split('/')[-3:-1]) 3435 vf_stats = None 3436 for vf_stats in re.finditer(virt_frac_finder, log): 3437 pass 3438 if not vf_stats is None: 3439 v_frac = safe_float(vf_stats.group('v_frac')) 3440 ###v_average = safe_float(vf_stats.group('v_average')) 3441 try: 3442 if v_frac < stats['virt_stats']['v_frac_min'][0]: 3443 stats['virt_stats']['v_frac_min']=(v_frac,channel_name) 3444 if v_frac > stats['virt_stats']['v_frac_max'][0]: 3445 stats['virt_stats']['v_frac_max']=(v_frac,channel_name) 3446 stats['virt_stats']['v_frac_avg'][0] += v_frac 3447 stats['virt_stats']['v_frac_avg'][1] += 1 3448 except KeyError: 3449 stats['virt_stats']['v_frac_min']=[v_frac,channel_name] 3450 stats['virt_stats']['v_frac_max']=[v_frac,channel_name] 3451 stats['virt_stats']['v_frac_avg']=[v_frac,1] 3452 3453 3454 ccontr_stats = None 3455 for ccontr_stats in re.finditer(channel_contr_finder, log): 3456 pass 3457 if not ccontr_stats is None: 3458 contrib = safe_float(ccontr_stats.group('v_contr')) 3459 try: 3460 if contrib>channel_contr_list[channel_name]: 3461 channel_contr_list[channel_name]=contrib 3462 except KeyError: 3463 channel_contr_list[channel_name]=contrib 3464 3465 3466 # Now build the list of relevant virt log files to look for the maxima 3467 # of virt fractions and such. 3468 average_contrib = 0.0 3469 for value in channel_contr_list.values(): 3470 average_contrib += value 3471 if len(channel_contr_list.values()) !=0: 3472 average_contrib = average_contrib / len(channel_contr_list.values()) 3473 3474 relevant_log_GV_files = [] 3475 excluded_channels = set([]) 3476 all_channels = set([]) 3477 for log_file in log_GV_files: 3478 channel_name = '/'.join(log_file.split('/')[-3:-1]) 3479 all_channels.add(channel_name) 3480 try: 3481 if channel_contr_list[channel_name] > (0.1*average_contrib): 3482 relevant_log_GV_files.append(log_file) 3483 else: 3484 excluded_channels.add(channel_name) 3485 except KeyError: 3486 relevant_log_GV_files.append(log_file) 3487 3488 # Now we want to use the latest occurence of accumulated result in the log file 3489 for gv_log in relevant_log_GV_files: 3490 logfile=open(gv_log,'r') 3491 log = logfile.read() 3492 logfile.close() 3493 channel_name = '/'.join(gv_log.split('/')[-3:-1]) 3494 3495 vt_stats = None 3496 for vt_stats in re.finditer(virt_tricks_finder, log): 3497 pass 3498 if not vt_stats is None: 3499 vt_stats_group = vt_stats.groupdict() 3500 v_ratio = safe_float(vt_stats.group('v_ratio')) 3501 v_ratio_err = safe_float(vt_stats.group('v_ratio_err')) 3502 v_contr = safe_float(vt_stats.group('v_abs_contr')) 3503 v_contr_err = safe_float(vt_stats.group('v_abs_contr_err')) 3504 try: 3505 if v_ratio < stats['virt_stats']['v_ratio_min'][0]: 3506 stats['virt_stats']['v_ratio_min']=(v_ratio,channel_name) 3507 if v_ratio > stats['virt_stats']['v_ratio_max'][0]: 3508 stats['virt_stats']['v_ratio_max']=(v_ratio,channel_name) 3509 if v_ratio < stats['virt_stats']['v_ratio_err_min'][0]: 3510 stats['virt_stats']['v_ratio_err_min']=(v_ratio_err,channel_name) 3511 if v_ratio > stats['virt_stats']['v_ratio_err_max'][0]: 3512 stats['virt_stats']['v_ratio_err_max']=(v_ratio_err,channel_name) 3513 if v_contr < stats['virt_stats']['v_contr_min'][0]: 3514 stats['virt_stats']['v_contr_min']=(v_contr,channel_name) 3515 if v_contr > stats['virt_stats']['v_contr_max'][0]: 3516 stats['virt_stats']['v_contr_max']=(v_contr,channel_name) 3517 if v_contr_err < stats['virt_stats']['v_contr_err_min'][0]: 3518 stats['virt_stats']['v_contr_err_min']=(v_contr_err,channel_name) 3519 if v_contr_err > stats['virt_stats']['v_contr_err_max'][0]: 3520 stats['virt_stats']['v_contr_err_max']=(v_contr_err,channel_name) 3521 except KeyError: 3522 stats['virt_stats']['v_ratio_min']=[v_ratio,channel_name] 3523 stats['virt_stats']['v_ratio_max']=[v_ratio,channel_name] 3524 stats['virt_stats']['v_ratio_err_min']=[v_ratio_err,channel_name] 3525 stats['virt_stats']['v_ratio_err_max']=[v_ratio_err,channel_name] 3526 stats['virt_stats']['v_contr_min']=[v_contr,channel_name] 3527 stats['virt_stats']['v_contr_max']=[v_contr,channel_name] 3528 stats['virt_stats']['v_contr_err_min']=[v_contr_err,channel_name] 3529 stats['virt_stats']['v_contr_err_max']=[v_contr_err,channel_name] 3530 3531 vf_stats = None 3532 for vf_stats in re.finditer(virt_frac_finder, log): 3533 pass 3534 ##if not vf_stats is None: 3535 ## v_frac = safe_float(vf_stats.group('v_frac')) 3536 ## v_average = safe_float(vf_stats.group('v_average')) 3537 ## try: 3538 ## if v_average < stats['virt_stats']['v_average_min'][0]: 3539 ## stats['virt_stats']['v_average_min']=(v_average,channel_name) 3540 ## if v_average > stats['virt_stats']['v_average_max'][0]: 3541 ## stats['virt_stats']['v_average_max']=(v_average,channel_name) 3542 ## stats['virt_stats']['v_average_avg'][0] += v_average 3543 ## stats['virt_stats']['v_average_avg'][1] += 1 3544 ## except KeyError: 3545 ## stats['virt_stats']['v_average_min']=[v_average,channel_name] 3546 ## stats['virt_stats']['v_average_max']=[v_average,channel_name] 3547 ## stats['virt_stats']['v_average_avg']=[v_average,1] 3548 3549 try: 3550 debug_msg += '\n\n Statistics on virtual integration optimization : ' 3551 3552 debug_msg += '\n Maximum virt fraction computed %.3f (%s)'\ 3553 %tuple(stats['virt_stats']['v_frac_max']) 3554 debug_msg += '\n Minimum virt fraction computed %.3f (%s)'\ 3555 %tuple(stats['virt_stats']['v_frac_min']) 3556 debug_msg += '\n Average virt fraction computed %.3f'\ 3557 %safe_float(stats['virt_stats']['v_frac_avg'][0]/safe_float(stats['virt_stats']['v_frac_avg'][1])) 3558 debug_msg += '\n Stats below exclude negligible channels (%d excluded out of %d)'%\ 3559 (len(excluded_channels),len(all_channels)) 3560 ##debug_msg += '\n Maximum virt ratio used %.2f (%s)'\ 3561 ## %tuple(stats['virt_stats']['v_average_max']) 3562 debug_msg += '\n Maximum virt ratio found from grids %.2f (%s)'\ 3563 %tuple(stats['virt_stats']['v_ratio_max']) 3564 tmpStr = '\n Max. MC err. on virt ratio from grids %.1f %% (%s)'\ 3565 %tuple(stats['virt_stats']['v_ratio_err_max']) 3566 debug_msg += tmpStr 3567 # After all it was decided that it is better not to alarm the user unecessarily 3568 # with such printout of the statistics. 3569 # if stats['virt_stats']['v_ratio_err_max'][0]>100.0 or \ 3570 # stats['virt_stats']['v_ratio_err_max'][0]>100.0: 3571 # message += "\n Suspiciously large MC error in :" 3572 # if stats['virt_stats']['v_ratio_err_max'][0]>100.0: 3573 # message += tmpStr 3574 3575 tmpStr = '\n Maximum MC error on abs virt %.1f %% (%s)'\ 3576 %tuple(stats['virt_stats']['v_contr_err_max']) 3577 debug_msg += tmpStr 3578 # if stats['virt_stats']['v_contr_err_max'][0]>100.0: 3579 # message += tmpStr 3580 3581 3582 except KeyError: 3583 debug_msg += '\n Could not find statistics on the integration optimization. ' 3584 3585 # ======================================= 3586 # == aMC@NLO timing profile statistics == 3587 # ======================================= 3588 3589 timing_stat_finder = re.compile(r"\s*Time spent in\s*(?P<name>\w*)\s*:\s*"+\ 3590 "(?P<time>[\d\+-Eed\.]*)\s*") 3591 3592 for logf in log_GV_files: 3593 logfile=open(logf,'r') 3594 log = logfile.read() 3595 logfile.close() 3596 channel_name = '/'.join(logf.split('/')[-3:-1]) 3597 mint = re.search(mint_search,logf) 3598 if not mint is None: 3599 channel_name = channel_name+' [step %s]'%mint.group('ID') 3600 3601 for time_stats in re.finditer(timing_stat_finder, log): 3602 try: 3603 stats['timings'][time_stats.group('name')][channel_name]+=\ 3604 safe_float(time_stats.group('time')) 3605 except KeyError: 3606 if time_stats.group('name') not in stats['timings'].keys(): 3607 stats['timings'][time_stats.group('name')] = {} 3608 stats['timings'][time_stats.group('name')][channel_name]=\ 3609 safe_float(time_stats.group('time')) 3610 3611 # useful inline function 3612 Tstr = lambda secs: str(datetime.timedelta(seconds=int(secs))) 3613 try: 3614 totTimeList = [(time, chan) for chan, time in \ 3615 stats['timings']['Total'].items()] 3616 except KeyError: 3617 totTimeList = [] 3618 3619 totTimeList.sort() 3620 if len(totTimeList)>0: 3621 debug_msg += '\n\n Inclusive timing profile :' 3622 debug_msg += '\n Overall slowest channel %s (%s)'%\ 3623 (Tstr(totTimeList[-1][0]),totTimeList[-1][1]) 3624 debug_msg += '\n Average channel running time %s'%\ 3625 Tstr(sum([el[0] for el in totTimeList])/len(totTimeList)) 3626 debug_msg += '\n Aggregated total running time %s'%\ 3627 Tstr(sum([el[0] for el in totTimeList])) 3628 else: 3629 debug_msg += '\n\n Inclusive timing profile non available.' 3630 3631 sorted_keys = sorted(stats['timings'].keys(), key= lambda stat: \ 3632 sum(stats['timings'][stat].values()), reverse=True) 3633 for name in sorted_keys: 3634 if name=='Total': 3635 continue 3636 if sum(stats['timings'][name].values())<=0.0: 3637 debug_msg += '\n Zero time record for %s.'%name 3638 continue 3639 try: 3640 TimeList = [((100.0*time/stats['timings']['Total'][chan]), 3641 chan) for chan, time in stats['timings'][name].items()] 3642 except KeyError, ZeroDivisionError: 3643 debug_msg += '\n\n Timing profile for %s unavailable.'%name 3644 continue 3645 TimeList.sort() 3646 debug_msg += '\n Timing profile for <%s> :'%name 3647 try: 3648 debug_msg += '\n Overall fraction of time %.3f %%'%\ 3649 safe_float((100.0*(sum(stats['timings'][name].values())/ 3650 sum(stats['timings']['Total'].values())))) 3651 except KeyError, ZeroDivisionError: 3652 debug_msg += '\n Overall fraction of time unavailable.' 3653 debug_msg += '\n Largest fraction of time %.3f %% (%s)'%\ 3654 (TimeList[-1][0],TimeList[-1][1]) 3655 debug_msg += '\n Smallest fraction of time %.3f %% (%s)'%\ 3656 (TimeList[0][0],TimeList[0][1]) 3657 3658 # ============================= 3659 # == log file eror detection == 3660 # ============================= 3661 3662 # Find the number of potential errors found in all log files 3663 # This re is a simple match on a case-insensitve 'error' but there is 3664 # also some veto added for excluding the sentence 3665 # "See Section 6 of paper for error calculation." 3666 # which appear in the header of lhapdf in the logs. 3667 err_finder = re.compile(\ 3668 r"(?<!of\spaper\sfor\s)\bERROR\b(?!\scalculation\.)",re.IGNORECASE) 3669 for log in all_log_files: 3670 logfile=open(log,'r') 3671 nErrors = len(re.findall(err_finder, logfile.read())) 3672 logfile.close() 3673 if nErrors != 0: 3674 stats['Errors'].append((str(log),nErrors)) 3675 3676 nErrors = sum([err[1] for err in stats['Errors']],0) 3677 if nErrors != 0: 3678 debug_msg += '\n WARNING:: A total of %d error%s ha%s been '\ 3679 %(nErrors,'s' if nErrors>1 else '','ve' if nErrors>1 else 's')+\ 3680 'found in the following log file%s:'%('s' if \ 3681 len(stats['Errors'])>1 else '') 3682 for error in stats['Errors'][:3]: 3683 log_name = '/'.join(error[0].split('/')[-5:]) 3684 debug_msg += '\n > %d error%s in %s'%\ 3685 (error[1],'s' if error[1]>1 else '',log_name) 3686 if len(stats['Errors'])>3: 3687 nRemainingErrors = sum([err[1] for err in stats['Errors']][3:],0) 3688 nRemainingLogs = len(stats['Errors'])-3 3689 debug_msg += '\n And another %d error%s in %d other log file%s'%\ 3690 (nRemainingErrors, 's' if nRemainingErrors>1 else '', 3691 nRemainingLogs, 's ' if nRemainingLogs>1 else '') 3692 3693 return message, debug_msg 3694 3695
3696 - def reweight_and_collect_events(self, options, mode, nevents, event_norm):
3697 """this function calls the reweighting routines and creates the event file in the 3698 Event dir. Return the name of the event file created 3699 """ 3700 scale_pdf_info=[] 3701 if any(self.run_card['reweight_scale']) or any(self.run_card['reweight_PDF']) or \ 3702 len(self.run_card['dynamical_scale_choice']) > 1 or len(self.run_card['lhaid']) > 1: 3703 scale_pdf_info = self.run_reweight(options['reweightonly']) 3704 self.update_status('Collecting events', level='parton', update_results=True) 3705 misc.compile(['collect_events'], 3706 cwd=pjoin(self.me_dir, 'SubProcesses'), nocompile=options['nocompile']) 3707 p = misc.Popen(['./collect_events'], cwd=pjoin(self.me_dir, 'SubProcesses'), 3708 stdin=subprocess.PIPE, 3709 stdout=open(pjoin(self.me_dir, 'collect_events.log'), 'w')) 3710 if event_norm.lower() == 'sum': 3711 p.communicate(input = '1\n') 3712 elif event_norm.lower() == 'unity': 3713 p.communicate(input = '3\n') 3714 elif event_norm.lower() == 'bias': 3715 p.communicate(input = '0\n') 3716 else: 3717 p.communicate(input = '2\n') 3718 3719 #get filename from collect events 3720 filename = open(pjoin(self.me_dir, 'collect_events.log')).read().split()[-1] 3721 3722 if not os.path.exists(pjoin(self.me_dir, 'SubProcesses', filename)): 3723 raise aMCatNLOError('An error occurred during event generation. ' + \ 3724 'The event file has not been created. Check collect_events.log') 3725 evt_file = pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz') 3726 misc.gzip(pjoin(self.me_dir, 'SubProcesses', filename), stdout=evt_file) 3727 if not options['reweightonly']: 3728 self.print_summary(options, 2, mode, scale_pdf_info) 3729 res_files = misc.glob('res*.txt', pjoin(self.me_dir, 'SubProcesses')) 3730 for res_file in res_files: 3731 files.mv(res_file,pjoin(self.me_dir, 'Events', self.run_name)) 3732 3733 logger.info('The %s file has been generated.\n' % (evt_file)) 3734 self.results.add_detail('nb_event', nevents) 3735 self.update_status('Events generated', level='parton', update_results=True) 3736 return evt_file[:-3]
3737 3738
3739 - def run_mcatnlo(self, evt_file, options):
3740 """runs mcatnlo on the generated event file, to produce showered-events 3741 """ 3742 logger.info('Preparing MCatNLO run') 3743 try: 3744 misc.gunzip(evt_file) 3745 except Exception: 3746 pass 3747 3748 self.banner = banner_mod.Banner(evt_file) 3749 shower = self.banner.get_detail('run_card', 'parton_shower').upper() 3750 3751 #check that the number of split event files divides the number of 3752 # events, otherwise set it to 1 3753 if int(self.banner.get_detail('run_card', 'nevents') / \ 3754 self.shower_card['nsplit_jobs']) * self.shower_card['nsplit_jobs'] \ 3755 != self.banner.get_detail('run_card', 'nevents'): 3756 logger.warning(\ 3757 'nsplit_jobs in the shower card is not a divisor of the number of events.\n' + \ 3758 'Setting it to 1.') 3759 self.shower_card['nsplit_jobs'] = 1 3760 3761 # don't split jobs if the user asks to shower only a part of the events 3762 if self.shower_card['nevents'] > 0 and \ 3763 self.shower_card['nevents'] < self.banner.get_detail('run_card', 'nevents') and \ 3764 self.shower_card['nsplit_jobs'] != 1: 3765 logger.warning(\ 3766 'Only a part of the events will be showered.\n' + \ 3767 'Setting nsplit_jobs in the shower_card to 1.') 3768 self.shower_card['nsplit_jobs'] = 1 3769 3770 self.banner_to_mcatnlo(evt_file) 3771 3772 # if fastjet has to be linked (in extralibs) then 3773 # add lib /include dirs for fastjet if fastjet-config is present on the 3774 # system, otherwise add fjcore to the files to combine 3775 if 'fastjet' in self.shower_card['extralibs']: 3776 #first, check that stdc++ is also linked 3777 if not 'stdc++' in self.shower_card['extralibs']: 3778 logger.warning('Linking FastJet: adding stdc++ to EXTRALIBS') 3779 self.shower_card['extralibs'] += ' stdc++' 3780 # then check if options[fastjet] corresponds to a valid fj installation 3781 try: 3782 #this is for a complete fj installation 3783 p = subprocess.Popen([self.options['fastjet'], '--prefix'], \ 3784 stdout=subprocess.PIPE, stderr=subprocess.PIPE) 3785 output, error = p.communicate() 3786 #remove the line break from output (last character) 3787 output = output[:-1] 3788 # add lib/include paths 3789 if not pjoin(output, 'lib') in self.shower_card['extrapaths']: 3790 logger.warning('Linking FastJet: updating EXTRAPATHS') 3791 self.shower_card['extrapaths'] += ' ' + pjoin(output, 'lib') 3792 if not pjoin(output, 'include') in self.shower_card['includepaths']: 3793 logger.warning('Linking FastJet: updating INCLUDEPATHS') 3794 self.shower_card['includepaths'] += ' ' + pjoin(output, 'include') 3795 # to be changed in the fortran wrapper 3796 include_line = '#include "fastjet/ClusterSequence.hh"//INCLUDE_FJ' 3797 namespace_line = 'namespace fj = fastjet;//NAMESPACE_FJ' 3798 except Exception: 3799 logger.warning('Linking FastJet: using fjcore') 3800 # this is for FJcore, so no FJ library has to be linked 3801 self.shower_card['extralibs'] = self.shower_card['extralibs'].replace('fastjet', '') 3802 if not 'fjcore.o' in self.shower_card['analyse']: 3803 self.shower_card['analyse'] += ' fjcore.o' 3804 # to be changed in the fortran wrapper 3805 include_line = '#include "fjcore.hh"//INCLUDE_FJ' 3806 namespace_line = 'namespace fj = fjcore;//NAMESPACE_FJ' 3807 # change the fortran wrapper with the correct namespaces/include 3808 fjwrapper_lines = open(pjoin(self.me_dir, 'MCatNLO', 'srcCommon', 'myfastjetfortran.cc')).read().split('\n') 3809 for line in fjwrapper_lines: 3810 if '//INCLUDE_FJ' in line: 3811 fjwrapper_lines[fjwrapper_lines.index(line)] = include_line 3812 if '//NAMESPACE_FJ' in line: 3813 fjwrapper_lines[fjwrapper_lines.index(line)] = namespace_line 3814 with open(pjoin(self.me_dir, 'MCatNLO', 'srcCommon', 'myfastjetfortran.cc'), 'w') as fsock: 3815 fsock.write('\n'.join(fjwrapper_lines) + '\n') 3816 3817 extrapaths = self.shower_card['extrapaths'].split() 3818 3819 # check that the path needed by HW++ and PY8 are set if one uses these shower 3820 if shower in ['HERWIGPP', 'PYTHIA8']: 3821 path_dict = {'HERWIGPP': ['hepmc_path', 3822 'thepeg_path', 3823 'hwpp_path'], 3824 'PYTHIA8': ['pythia8_path']} 3825 3826 if not all([self.options[ppath] and os.path.exists(self.options[ppath]) for ppath in path_dict[shower]]): 3827 raise aMCatNLOError('Some paths are missing or invalid in the configuration file.\n' + \ 3828 ('Please make sure you have set these variables: %s' % ', '.join(path_dict[shower]))) 3829 3830 if shower == 'HERWIGPP': 3831 extrapaths.append(pjoin(self.options['hepmc_path'], 'lib')) 3832 self.shower_card['extrapaths'] += ' %s' % pjoin(self.options['hepmc_path'], 'lib') 3833 3834 # add the HEPMC path of the pythia8 installation 3835 if shower == 'PYTHIA8': 3836 hepmc = subprocess.Popen([pjoin(self.options['pythia8_path'], 'bin', 'pythia8-config'), '--hepmc2'], 3837 stdout = subprocess.PIPE).stdout.read().strip() 3838 #this gives all the flags, i.e. 3839 #-I/Path/to/HepMC/include -L/Path/to/HepMC/lib -lHepMC 3840 # we just need the path to the HepMC libraries 3841 extrapaths.append(hepmc.split()[1].replace('-L', '')) 3842 3843 if shower == 'PYTHIA8' and not os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')): 3844 extrapaths.append(pjoin(self.options['pythia8_path'], 'lib')) 3845 3846 # set the PATH for the dynamic libraries 3847 if sys.platform == 'darwin': 3848 ld_library_path = 'DYLD_LIBRARY_PATH' 3849 else: 3850 ld_library_path = 'LD_LIBRARY_PATH' 3851 if ld_library_path in os.environ.keys(): 3852 paths = os.environ[ld_library_path] 3853 else: 3854 paths = '' 3855 paths += ':' + ':'.join(extrapaths) 3856 os.putenv(ld_library_path, paths) 3857 3858 shower_card_path = pjoin(self.me_dir, 'MCatNLO', 'shower_card.dat') 3859 self.shower_card.write_card(shower, shower_card_path) 3860 3861 # overwrite if shower_card_set.dat exists in MCatNLO 3862 if os.path.exists(pjoin(self.me_dir, 'MCatNLO', 'shower_card_set.dat')): 3863 files.mv(pjoin(self.me_dir, 'MCatNLO', 'shower_card_set.dat'), 3864 pjoin(self.me_dir, 'MCatNLO', 'shower_card.dat')) 3865 3866 mcatnlo_log = pjoin(self.me_dir, 'mcatnlo.log') 3867 self.update_status('Compiling MCatNLO for %s...' % shower, level='shower') 3868 3869 3870 # libdl may be needded for pythia 82xx 3871 #if shower == 'PYTHIA8' and not \ 3872 # os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')) and \ 3873 # 'dl' not in self.shower_card['extralibs'].split(): 3874 # # 'dl' has to be linked with the extralibs 3875 # self.shower_card['extralibs'] += ' dl' 3876 # logger.warning("'dl' was added to extralibs from the shower_card.dat.\n" + \ 3877 # "It is needed for the correct running of PY8.2xx.\n" + \ 3878 # "If this library cannot be found on your system, a crash will occur.") 3879 3880 misc.call(['./MCatNLO_MadFKS.inputs'], stdout=open(mcatnlo_log, 'w'), 3881 stderr=open(mcatnlo_log, 'w'), 3882 cwd=pjoin(self.me_dir, 'MCatNLO'), 3883 close_fds=True) 3884 3885 exe = 'MCATNLO_%s_EXE' % shower 3886 if not os.path.exists(pjoin(self.me_dir, 'MCatNLO', exe)) and \ 3887 not os.path.exists(pjoin(self.me_dir, 'MCatNLO', 'Pythia8.exe')): 3888 print open(mcatnlo_log).read() 3889 raise aMCatNLOError('Compilation failed, check %s for details' % mcatnlo_log) 3890 logger.info(' ... done') 3891 3892 # create an empty dir where to run 3893 count = 1 3894 while os.path.isdir(pjoin(self.me_dir, 'MCatNLO', 'RUN_%s_%d' % \ 3895 (shower, count))): 3896 count += 1 3897 rundir = pjoin(self.me_dir, 'MCatNLO', 'RUN_%s_%d' % \ 3898 (shower, count)) 3899 os.mkdir(rundir) 3900 files.cp(shower_card_path, rundir) 3901 3902 #look for the event files (don't resplit if one asks for the 3903 # same number of event files as in the previous run) 3904 event_files = misc.glob('events_*.lhe', pjoin(self.me_dir, 'Events', self.run_name)) 3905 if max(len(event_files), 1) != self.shower_card['nsplit_jobs']: 3906 logger.info('Cleaning old files and splitting the event file...') 3907 #clean the old files 3908 files.rm([f for f in event_files if 'events.lhe' not in f]) 3909 if self.shower_card['nsplit_jobs'] > 1: 3910 misc.compile(['split_events'], cwd = pjoin(self.me_dir, 'Utilities'), nocompile=options['nocompile']) 3911 p = misc.Popen([pjoin(self.me_dir, 'Utilities', 'split_events')], 3912 stdin=subprocess.PIPE, 3913 stdout=open(pjoin(self.me_dir, 'Events', self.run_name, 'split_events.log'), 'w'), 3914 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 3915 p.communicate(input = 'events.lhe\n%d\n' % self.shower_card['nsplit_jobs']) 3916 logger.info('Splitting done.') 3917 event_files = misc.glob('events_*.lhe', pjoin(self.me_dir, 'Events', self.run_name)) 3918 3919 event_files.sort() 3920 3921 self.update_status('Showering events...', level='shower') 3922 logger.info('(Running in %s)' % rundir) 3923 if shower != 'PYTHIA8': 3924 files.mv(pjoin(self.me_dir, 'MCatNLO', exe), rundir) 3925 files.mv(pjoin(self.me_dir, 'MCatNLO', 'MCATNLO_%s_input' % shower), rundir) 3926 else: 3927 # special treatment for pythia8 3928 files.mv(pjoin(self.me_dir, 'MCatNLO', 'Pythia8.cmd'), rundir) 3929 files.mv(pjoin(self.me_dir, 'MCatNLO', 'Pythia8.exe'), rundir) 3930 if os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')): # this is PY8.1xxx 3931 files.ln(pjoin(self.options['pythia8_path'], 'examples', 'config.sh'), rundir) 3932 files.ln(pjoin(self.options['pythia8_path'], 'xmldoc'), rundir) 3933 else: # this is PY8.2xxx 3934 files.ln(pjoin(self.options['pythia8_path'], 'share/Pythia8/xmldoc'), rundir) 3935 #link the hwpp exe in the rundir 3936 if shower == 'HERWIGPP': 3937 try: 3938 if os.path.exists(pjoin(self.options['hwpp_path'], 'bin', 'Herwig++')): 3939 files.ln(pjoin(self.options['hwpp_path'], 'bin', 'Herwig++'), rundir) 3940 if os.path.exists(pjoin(self.options['hwpp_path'], 'bin', 'Herwig')): 3941 files.ln(pjoin(self.options['hwpp_path'], 'bin', 'Herwig'), rundir) 3942 except Exception: 3943 raise aMCatNLOError('The Herwig++ path set in the configuration file is not valid.') 3944 3945 if os.path.exists(pjoin(self.me_dir, 'MCatNLO', 'HWPPAnalyzer', 'HepMCFortran.so')): 3946 files.cp(pjoin(self.me_dir, 'MCatNLO', 'HWPPAnalyzer', 'HepMCFortran.so'), rundir) 3947 3948 files.ln(evt_file, rundir, 'events.lhe') 3949 for i, f in enumerate(event_files): 3950 files.ln(f, rundir,'events_%d.lhe' % (i + 1)) 3951 3952 if not self.shower_card['analyse']: 3953 # an hep/hepmc file as output 3954 out_id = 'HEP' 3955 else: 3956 # one or more .top file(s) as output 3957 if "HwU" in self.shower_card['analyse']: 3958 out_id = 'HWU' 3959 else: 3960 out_id = 'TOP' 3961 3962 # write the executable 3963 with open(pjoin(rundir, 'shower.sh'), 'w') as fsock: 3964 # set the PATH for the dynamic libraries 3965 if sys.platform == 'darwin': 3966 ld_library_path = 'DYLD_LIBRARY_PATH' 3967 else: 3968 ld_library_path = 'LD_LIBRARY_PATH' 3969 fsock.write(open(pjoin(self.me_dir, 'MCatNLO', 'shower_template.sh')).read() \ 3970 % {'ld_library_path': ld_library_path, 3971 'extralibs': ':'.join(extrapaths)}) 3972 subprocess.call(['chmod', '+x', pjoin(rundir, 'shower.sh')]) 3973 3974 if event_files: 3975 arg_list = [[shower, out_id, self.run_name, '%d' % (i + 1)] \ 3976 for i in range(len(event_files))] 3977 else: 3978 arg_list = [[shower, out_id, self.run_name]] 3979 3980 self.run_all({rundir: 'shower.sh'}, arg_list, 'shower') 3981 self.njobs = 1 3982 self.wait_for_complete('shower') 3983 3984 # now collect the results 3985 message = '' 3986 warning = '' 3987 to_gzip = [evt_file] 3988 if out_id == 'HEP': 3989 #copy the showered stdhep/hepmc file back in events 3990 if shower in ['PYTHIA8', 'HERWIGPP']: 3991 hep_format = 'HEPMC' 3992 ext = 'hepmc' 3993 else: 3994 hep_format = 'StdHEP' 3995 ext = 'hep' 3996 3997 hep_file = '%s_%s_0.%s.gz' % \ 3998 (pjoin(os.path.dirname(evt_file), 'events'), shower, ext) 3999 count = 0 4000 4001 # find the first available name for the output: 4002 # check existing results with or without event splitting 4003 while os.path.exists(hep_file) or \ 4004 os.path.exists(hep_file.replace('.%s.gz' % ext, '__1.%s.gz' % ext)) : 4005 count +=1 4006 hep_file = '%s_%s_%d.%s.gz' % \ 4007 (pjoin(os.path.dirname(evt_file), 'events'), shower, count, ext) 4008 4009 try: 4010 if self.shower_card['nsplit_jobs'] == 1: 4011 files.mv(os.path.join(rundir, 'events.%s.gz' % ext), hep_file) 4012 message = ('The file %s has been generated. \nIt contains showered' + \ 4013 ' and hadronized events in the %s format obtained by' + \ 4014 ' showering the parton-level event file %s.gz with %s') % \ 4015 (hep_file, hep_format, evt_file, shower) 4016 else: 4017 hep_list = [] 4018 for i in range(self.shower_card['nsplit_jobs']): 4019 hep_list.append(hep_file.replace('.%s.gz' % ext, '__%d.%s.gz' % (i + 1, ext))) 4020 files.mv(os.path.join(rundir, 'events_%d.%s.gz' % (i + 1, ext)), hep_list[-1]) 4021 message = ('The following files have been generated:\n %s\nThey contain showered' + \ 4022 ' and hadronized events in the %s format obtained by' + \ 4023 ' showering the (split) parton-level event file %s.gz with %s') % \ 4024 ('\n '.join(hep_list), hep_format, evt_file, shower) 4025 4026 except OSError, IOError: 4027 raise aMCatNLOError('No file has been generated, an error occurred.'+\ 4028 ' More information in %s' % pjoin(os.getcwd(), 'amcatnlo_run.log')) 4029 4030 # run the plot creation in a secure way 4031 if hep_format == 'StdHEP': 4032 try: 4033 self.do_plot('%s -f' % self.run_name) 4034 except Exception, error: 4035 logger.info("Fail to make the plot. Continue...") 4036 pass 4037 4038 elif out_id == 'TOP' or out_id == 'HWU': 4039 #copy the topdrawer or HwU file(s) back in events 4040 if out_id=='TOP': 4041 ext='top' 4042 elif out_id=='HWU': 4043 ext='HwU' 4044 topfiles = [] 4045 top_tars = [tarfile.TarFile(f) for f in misc.glob('histfile*.tar', rundir)] 4046 for top_tar in top_tars: 4047 topfiles.extend(top_tar.getnames()) 4048 4049 # safety check 4050 if len(top_tars) != self.shower_card['nsplit_jobs']: 4051 raise aMCatNLOError('%d job(s) expected, %d file(s) found' % \ 4052 (self.shower_card['nsplit_jobs'], len(top_tars))) 4053 4054 # find the first available name for the output: 4055 # check existing results with or without event splitting 4056 filename = 'plot_%s_%d_' % (shower, 1) 4057 count = 1 4058 while os.path.exists(pjoin(self.me_dir, 'Events', 4059 self.run_name, '%s0.%s' % (filename,ext))) or \ 4060 os.path.exists(pjoin(self.me_dir, 'Events', 4061 self.run_name, '%s0__1.%s' % (filename,ext))): 4062 count += 1 4063 filename = 'plot_%s_%d_' % (shower, count) 4064 4065 if out_id=='TOP': 4066 hist_format='TopDrawer format' 4067 elif out_id=='HWU': 4068 hist_format='HwU and GnuPlot formats' 4069 4070 if not topfiles: 4071 # if no topfiles are found just warn the user 4072 warning = 'No .top file has been generated. For the results of your ' +\ 4073 'run, please check inside %s' % rundir 4074 elif self.shower_card['nsplit_jobs'] == 1: 4075 # only one job for the shower 4076 top_tars[0].extractall(path = rundir) 4077 plotfiles = [] 4078 for i, file in enumerate(topfiles): 4079 if out_id=='TOP': 4080 plotfile = pjoin(self.me_dir, 'Events', self.run_name, 4081 '%s%d.top' % (filename, i)) 4082 files.mv(pjoin(rundir, file), plotfile) 4083 elif out_id=='HWU': 4084 out=pjoin(self.me_dir,'Events', 4085 self.run_name,'%s%d'% (filename,i)) 4086 histos=[{'dirname':pjoin(rundir,file)}] 4087 self.combine_plots_HwU(histos,out) 4088 try: 4089 misc.call(['gnuplot','%s%d.gnuplot' % (filename,i)],\ 4090 stdout=os.open(os.devnull, os.O_RDWR),\ 4091 stderr=os.open(os.devnull, os.O_RDWR),\ 4092 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 4093 except Exception: 4094 pass 4095 plotfile=pjoin(self.me_dir,'Events',self.run_name, 4096 '%s%d.HwU'% (filename,i)) 4097 plotfiles.append(plotfile) 4098 4099 ffiles = 'files' 4100 have = 'have' 4101 if len(plotfiles) == 1: 4102 ffiles = 'file' 4103 have = 'has' 4104 4105 message = ('The %s %s %s been generated, with histograms in the' + \ 4106 ' %s, obtained by showering the parton-level' + \ 4107 ' file %s.gz with %s.') % (ffiles, ', '.join(plotfiles), have, \ 4108 hist_format, evt_file, shower) 4109 else: 4110 # many jobs for the shower have been run 4111 topfiles_set = set(topfiles) 4112 plotfiles = [] 4113 for j, top_tar in enumerate(top_tars): 4114 top_tar.extractall(path = rundir) 4115 for i, file in enumerate(topfiles_set): 4116 plotfile = pjoin(self.me_dir, 'Events', self.run_name, 4117 '%s%d__%d.%s' % (filename, i, j + 1,ext)) 4118 files.mv(pjoin(rundir, file), plotfile) 4119 plotfiles.append(plotfile) 4120 4121 # check if the user asked to combine the .top into a single file 4122 if self.shower_card['combine_td']: 4123 misc.compile(['sum_plots'], cwd = pjoin(self.me_dir, 'Utilities')) 4124 4125 if self.banner.get('run_card', 'event_norm').lower() == 'sum': 4126 norm = 1. 4127 else: 4128 norm = 1./float(self.shower_card['nsplit_jobs']) 4129 4130 plotfiles2 = [] 4131 for i, file in enumerate(topfiles_set): 4132 filelist = ['%s%d__%d.%s' % (filename, i, j + 1,ext) \ 4133 for j in range(self.shower_card['nsplit_jobs'])] 4134 if out_id=='TOP': 4135 infile="%d\n%s\n%s\n" % \ 4136 (self.shower_card['nsplit_jobs'], 4137 '\n'.join(filelist), 4138 '\n'.join([str(norm)] * self.shower_card['nsplit_jobs'])) 4139 p = misc.Popen([pjoin(self.me_dir, 'Utilities', 'sum_plots')], 4140 stdin=subprocess.PIPE, 4141 stdout=os.open(os.devnull, os.O_RDWR), 4142 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 4143 p.communicate(input = infile) 4144 files.mv(pjoin(self.me_dir, 'Events', self.run_name, 'sum.top'), 4145 pjoin(self.me_dir, 'Events', self.run_name, '%s%d.top' % (filename, i))) 4146 elif out_id=='HWU': 4147 out=pjoin(self.me_dir,'Events', 4148 self.run_name,'%s%d'% (filename,i)) 4149 histos=[] 4150 norms=[] 4151 for plotfile in plotfiles: 4152 histos.append({'dirname':plotfile}) 4153 norms.append(norm) 4154 self.combine_plots_HwU(histos,out,normalisation=norms) 4155 try: 4156 misc.call(['gnuplot','%s%d.gnuplot' % (filename, i)],\ 4157 stdout=os.open(os.devnull, os.O_RDWR),\ 4158 stderr=os.open(os.devnull, os.O_RDWR),\ 4159 cwd=pjoin(self.me_dir, 'Events',self.run_name)) 4160 except Exception: 4161 pass 4162 4163 plotfiles2.append(pjoin(self.me_dir, 'Events', self.run_name, '%s%d.%s' % (filename, i,ext))) 4164 tar = tarfile.open( 4165 pjoin(self.me_dir, 'Events', self.run_name, '%s%d.tar.gz' % (filename, i)), 'w:gz') 4166 for f in filelist: 4167 tar.add(pjoin(self.me_dir, 'Events', self.run_name, f), arcname=f) 4168 files.rm([pjoin(self.me_dir, 'Events', self.run_name, f) for f in filelist]) 4169 4170 tar.close() 4171 4172 ffiles = 'files' 4173 have = 'have' 4174 if len(plotfiles2) == 1: 4175 ffiles = 'file' 4176 have = 'has' 4177 4178 message = ('The %s %s %s been generated, with histograms in the' + \ 4179 ' %s, obtained by showering the parton-level' + \ 4180 ' file %s.gz with %s.\n' + \ 4181 'The files from the different shower ' + \ 4182 'jobs (before combining them) can be found inside %s.') % \ 4183 (ffiles, ', '.join(plotfiles2), have, hist_format,\ 4184 evt_file, shower, 4185 ', '.join([f.replace('%s' % ext, 'tar.gz') for f in plotfiles2])) 4186 4187 else: 4188 message = ('The following files have been generated:\n %s\n' + \ 4189 'They contain histograms in the' + \ 4190 ' %s, obtained by showering the parton-level' + \ 4191 ' file %s.gz with %s.') % ('\n '.join(plotfiles), \ 4192 hist_format, evt_file, shower) 4193 4194 # Now arxiv the shower card used if RunMaterial is present 4195 run_dir_path = pjoin(rundir, self.run_name) 4196 if os.path.exists(pjoin(run_dir_path,'RunMaterial.tar.gz')): 4197 misc.call(['tar','-xzpf','RunMaterial.tar.gz'],cwd=run_dir_path) 4198 files.cp(pjoin(self.me_dir,'Cards','shower_card.dat'), 4199 pjoin(run_dir_path,'RunMaterial','shower_card_for_%s_%d.dat'\ 4200 %(shower, count))) 4201 misc.call(['tar','-czpf','RunMaterial.tar.gz','RunMaterial'], 4202 cwd=run_dir_path) 4203 shutil.rmtree(pjoin(run_dir_path,'RunMaterial')) 4204 # end of the run, gzip files and print out the message/warning 4205 for f in to_gzip: 4206 misc.gzip(f) 4207 if message: 4208 logger.info(message) 4209 if warning: 4210 logger.warning(warning) 4211 4212 self.update_status('Run complete', level='shower', update_results=True)
4213 4214 ############################################################################
4215 - def set_run_name(self, name, tag=None, level='parton', reload_card=False):
4216 """define the run name, the run_tag, the banner and the results.""" 4217 4218 # when are we force to change the tag new_run:previous run requiring changes 4219 upgrade_tag = {'parton': ['parton','delphes','shower','madanalysis5_hadron'], 4220 'shower': ['shower','delphes','madanalysis5_hadron'], 4221 'delphes':['delphes'], 4222 'madanalysis5_hadron':['madanalysis5_hadron'], 4223 'plot':[]} 4224 4225 if name == self.run_name: 4226 if reload_card: 4227 run_card = pjoin(self.me_dir, 'Cards','run_card.dat') 4228 self.run_card = banner_mod.RunCardNLO(run_card) 4229 4230 #check if we need to change the tag 4231 if tag: 4232 self.run_card['run_tag'] = tag 4233 self.run_tag = tag 4234 self.results.add_run(self.run_name, self.run_card) 4235 else: 4236 for tag in upgrade_tag[level]: 4237 if getattr(self.results[self.run_name][-1], tag): 4238 tag = self.get_available_tag() 4239 self.run_card['run_tag'] = tag 4240 self.run_tag = tag 4241 self.results.add_run(self.run_name, self.run_card) 4242 break 4243 return # Nothing to do anymore 4244 4245 # save/clean previous run 4246 if self.run_name: 4247 self.store_result() 4248 # store new name 4249 self.run_name = name 4250 4251 # Read run_card 4252 run_card = pjoin(self.me_dir, 'Cards','run_card.dat') 4253 self.run_card = banner_mod.RunCardNLO(run_card) 4254 4255 new_tag = False 4256 # First call for this run -> set the banner 4257 self.banner = banner_mod.recover_banner(self.results, level, self.run_name, tag) 4258 if 'mgruncard' in self.banner: 4259 self.run_card = self.banner.charge_card('run_card') 4260 if tag: 4261 self.run_card['run_tag'] = tag 4262 new_tag = True 4263 elif not self.run_name in self.results and level =='parton': 4264 pass # No results yet, so current tag is fine 4265 elif not self.run_name in self.results: 4266 #This is only for case when you want to trick the interface 4267 logger.warning('Trying to run data on unknown run.') 4268 self.results.add_run(name, self.run_card) 4269 self.results.update('add run %s' % name, 'all', makehtml=True) 4270 else: 4271 for tag in upgrade_tag[level]: 4272 4273 if getattr(self.results[self.run_name][-1], tag): 4274 # LEVEL is already define in the last tag -> need to switch tag 4275 tag = self.get_available_tag() 4276 self.run_card['run_tag'] = tag 4277 new_tag = True 4278 break 4279 if not new_tag: 4280 # We can add the results to the current run 4281 tag = self.results[self.run_name][-1]['tag'] 4282 self.run_card['run_tag'] = tag # ensure that run_tag is correct 4283 4284 4285 if name in self.results and not new_tag: 4286 self.results.def_current(self.run_name) 4287 else: 4288 self.results.add_run(self.run_name, self.run_card) 4289 4290 self.run_tag = self.run_card['run_tag'] 4291 4292 # Return the tag of the previous run having the required data for this 4293 # tag/run to working wel. 4294 if level == 'parton': 4295 return 4296 elif level == 'pythia': 4297 return self.results[self.run_name][0]['tag'] 4298 else: 4299 for i in range(-1,-len(self.results[self.run_name])-1,-1): 4300 tagRun = self.results[self.run_name][i] 4301 if tagRun.pythia: 4302 return tagRun['tag']
4303 4304
4305 - def store_result(self):
4306 """ tar the pythia results. This is done when we are quite sure that 4307 the pythia output will not be use anymore """ 4308 4309 if not self.run_name: 4310 return 4311 4312 self.results.save() 4313 4314 if not self.to_store: 4315 return 4316 4317 if 'event' in self.to_store: 4318 if os.path.exists(pjoin(self.me_dir,'Events', self.run_name, 'events.lhe')): 4319 if not os.path.exists(pjoin(self.me_dir,'Events', self.run_name, 'events.lhe.gz')): 4320 self.update_status('gzipping output file: events.lhe', level='parton', error=True) 4321 misc.gzip(pjoin(self.me_dir,'Events', self.run_name, 'events.lhe')) 4322 else: 4323 os.remove(pjoin(self.me_dir,'Events', self.run_name, 'events.lhe')) 4324 if os.path.exists(pjoin(self.me_dir,'Events','reweight.lhe')): 4325 os.remove(pjoin(self.me_dir,'Events', 'reweight.lhe')) 4326 4327 4328 tag = self.run_card['run_tag'] 4329 4330 self.to_store = []
4331 4332 4333 ############################################################################
4334 - def get_Gdir(self, Pdir=None):
4335 """get the list of Gdirectory if not yet saved.""" 4336 4337 if hasattr(self, "Gdirs"): 4338 if self.me_dir in self.Gdirs: 4339 if Pdir is None: 4340 return sum(self.Gdirs.values()) 4341 else: 4342 return self.Gdirs[Pdir] 4343 4344 Pdirs = self.get_Pdir() 4345 Gdirs = {self.me_dir:[]} 4346 for P in Pdirs: 4347 Gdirs[P] = [pjoin(P,G) for G in os.listdir(P) if G.startswith('G') and 4348 os.path.isdir(pjoin(P,G))] 4349 4350 self.Gdirs = Gdirs 4351 return self.getGdir(Pdir)
4352 4353
4354 - def get_init_dict(self, evt_file):
4355 """reads the info in the init block and returns them in a dictionary""" 4356 ev_file = open(evt_file) 4357 init = "" 4358 found = False 4359 while True: 4360 line = ev_file.readline() 4361 if "<init>" in line: 4362 found = True 4363 elif found and not line.startswith('#'): 4364 init += line 4365 if "</init>" in line or "<event>" in line: 4366 break 4367 ev_file.close() 4368 4369 # IDBMUP(1),IDBMUP(2),EBMUP(1),EBMUP(2), PDFGUP(1),PDFGUP(2), 4370 # PDFSUP(1),PDFSUP(2),IDWTUP,NPRUP 4371 # these are not included (so far) in the init_dict 4372 # XSECUP(1),XERRUP(1),XMAXUP(1),LPRUP(1) 4373 4374 init_dict = {} 4375 init_dict['idbmup1'] = int(init.split()[0]) 4376 init_dict['idbmup2'] = int(init.split()[1]) 4377 init_dict['ebmup1'] = float(init.split()[2]) 4378 init_dict['ebmup2'] = float(init.split()[3]) 4379 init_dict['pdfgup1'] = int(init.split()[4]) 4380 init_dict['pdfgup2'] = int(init.split()[5]) 4381 init_dict['pdfsup1'] = int(init.split()[6]) 4382 init_dict['pdfsup2'] = int(init.split()[7]) 4383 init_dict['idwtup'] = int(init.split()[8]) 4384 init_dict['nprup'] = int(init.split()[9]) 4385 4386 return init_dict
4387 4388
4389 - def banner_to_mcatnlo(self, evt_file):
4390 """creates the mcatnlo input script using the values set in the header of the event_file. 4391 It also checks if the lhapdf library is used""" 4392 4393 shower = self.banner.get('run_card', 'parton_shower').upper() 4394 pdlabel = self.banner.get('run_card', 'pdlabel') 4395 itry = 0 4396 nevents = self.shower_card['nevents'] 4397 init_dict = self.get_init_dict(evt_file) 4398 4399 if nevents < 0 or \ 4400 nevents > self.banner.get_detail('run_card', 'nevents'): 4401 nevents = self.banner.get_detail('run_card', 'nevents') 4402 4403 nevents = nevents / self.shower_card['nsplit_jobs'] 4404 4405 mcmass_dict = {} 4406 for line in [l for l in self.banner['montecarlomasses'].split('\n') if l]: 4407 pdg = int(line.split()[0]) 4408 mass = float(line.split()[1]) 4409 mcmass_dict[pdg] = mass 4410 4411 content = 'EVPREFIX=%s\n' % pjoin(os.path.split(evt_file)[1]) 4412 content += 'NEVENTS=%d\n' % nevents 4413 content += 'NEVENTS_TOT=%d\n' % (self.banner.get_detail('run_card', 'nevents') /\ 4414 self.shower_card['nsplit_jobs']) 4415 content += 'MCMODE=%s\n' % shower 4416 content += 'PDLABEL=%s\n' % pdlabel 4417 content += 'ALPHAEW=%s\n' % self.banner.get_detail('param_card', 'sminputs', 1).value 4418 #content += 'PDFSET=%s\n' % self.banner.get_detail('run_card', 'lhaid') 4419 #content += 'PDFSET=%s\n' % max([init_dict['pdfsup1'],init_dict['pdfsup2']]) 4420 content += 'TMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 6).value 4421 content += 'TWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 6).value 4422 content += 'ZMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 23).value 4423 content += 'ZWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 23).value 4424 content += 'WMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 24).value 4425 content += 'WWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 24).value 4426 try: 4427 content += 'HGGMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 25).value 4428 content += 'HGGWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 25).value 4429 except KeyError: 4430 content += 'HGGMASS=120.\n' 4431 content += 'HGGWIDTH=0.00575308848\n' 4432 content += 'beammom1=%s\n' % self.banner.get_detail('run_card', 'ebeam1') 4433 content += 'beammom2=%s\n' % self.banner.get_detail('run_card', 'ebeam2') 4434 content += 'BEAM1=%s\n' % self.banner.get_detail('run_card', 'lpp1') 4435 content += 'BEAM2=%s\n' % self.banner.get_detail('run_card', 'lpp2') 4436 content += 'DMASS=%s\n' % mcmass_dict[1] 4437 content += 'UMASS=%s\n' % mcmass_dict[2] 4438 content += 'SMASS=%s\n' % mcmass_dict[3] 4439 content += 'CMASS=%s\n' % mcmass_dict[4] 4440 content += 'BMASS=%s\n' % mcmass_dict[5] 4441 try: 4442 content += 'EMASS=%s\n' % mcmass_dict[11] 4443 content += 'MUMASS=%s\n' % mcmass_dict[13] 4444 content += 'TAUMASS=%s\n' % mcmass_dict[15] 4445 except KeyError: 4446 # this is for backward compatibility 4447 mcmass_lines = [l for l in \ 4448 open(pjoin(self.me_dir, 'SubProcesses', 'MCmasses_%s.inc' % shower.upper()) 4449 ).read().split('\n') if l] 4450 new_mcmass_dict = {} 4451 for l in mcmass_lines: 4452 key, val = l.split('=') 4453 new_mcmass_dict[key.strip()] = val.replace('d', 'e').strip() 4454 content += 'EMASS=%s\n' % new_mcmass_dict['mcmass(11)'] 4455 content += 'MUMASS=%s\n' % new_mcmass_dict['mcmass(13)'] 4456 content += 'TAUMASS=%s\n' % new_mcmass_dict['mcmass(15)'] 4457 4458 content += 'GMASS=%s\n' % mcmass_dict[21] 4459 content += 'EVENT_NORM=%s\n' % self.banner.get_detail('run_card', 'event_norm').lower() 4460 # check if need to link lhapdf 4461 if int(self.shower_card['pdfcode']) > 1 or \ 4462 (pdlabel=='lhapdf' and int(self.shower_card['pdfcode'])==1) or \ 4463 shower=='HERWIGPP' : 4464 # Use LHAPDF (should be correctly installed, because 4465 # either events were already generated with them, or the 4466 # user explicitly gives an LHAPDF number in the 4467 # shower_card). 4468 self.link_lhapdf(pjoin(self.me_dir, 'lib')) 4469 lhapdfpath = subprocess.Popen([self.options['lhapdf'], '--prefix'], 4470 stdout = subprocess.PIPE).stdout.read().strip() 4471 content += 'LHAPDFPATH=%s\n' % lhapdfpath 4472 pdfsetsdir = self.get_lhapdf_pdfsetsdir() 4473 if self.shower_card['pdfcode']==0: 4474 lhaid_list = '' 4475 content += '' 4476 elif self.shower_card['pdfcode']==1: 4477 lhaid_list = [max([init_dict['pdfsup1'],init_dict['pdfsup2']])] 4478 content += 'PDFCODE=%s\n' % max([init_dict['pdfsup1'],init_dict['pdfsup2']]) 4479 else: 4480 lhaid_list = [abs(int(self.shower_card['pdfcode']))] 4481 content += 'PDFCODE=%s\n' % self.shower_card['pdfcode'] 4482 self.copy_lhapdf_set(lhaid_list, pdfsetsdir) 4483 elif int(self.shower_card['pdfcode'])==1 or \ 4484 int(self.shower_card['pdfcode'])==-1 and True: 4485 # Try to use LHAPDF because user wants to use the same PDF 4486 # as was used for the event generation. However, for the 4487 # event generation, LHAPDF was not used, so non-trivial to 4488 # see if if LHAPDF is available with the corresponding PDF 4489 # set. If not found, give a warning and use build-in PDF 4490 # set instead. 4491 try: 4492 lhapdfpath = subprocess.Popen([self.options['lhapdf'], '--prefix'], 4493 stdout = subprocess.PIPE).stdout.read().strip() 4494 self.link_lhapdf(pjoin(self.me_dir, 'lib')) 4495 content += 'LHAPDFPATH=%s\n' % lhapdfpath 4496 pdfsetsdir = self.get_lhapdf_pdfsetsdir() 4497 lhaid_list = [max([init_dict['pdfsup1'],init_dict['pdfsup2']])] 4498 content += 'PDFCODE=%s\n' % max([init_dict['pdfsup1'],init_dict['pdfsup2']]) 4499 self.copy_lhapdf_set(lhaid_list, pdfsetsdir) 4500 except Exception: 4501 logger.warning('Trying to shower events using the same PDF in the shower as used in the generation'+\ 4502 ' of the events using LHAPDF. However, no valid LHAPDF installation found with the'+\ 4503 ' needed PDF set. Will use default internal PDF for the shower instead. To use the'+\ 4504 ' same set as was used in the event generation install LHAPDF and set the path using'+\ 4505 ' "set /path_to_lhapdf/bin/lhapdf-config" from the MadGraph5_aMC@NLO python shell') 4506 content += 'LHAPDFPATH=\n' 4507 content += 'PDFCODE=0\n' 4508 else: 4509 content += 'LHAPDFPATH=\n' 4510 content += 'PDFCODE=0\n' 4511 4512 content += 'ICKKW=%s\n' % self.banner.get_detail('run_card', 'ickkw') 4513 content += 'PTJCUT=%s\n' % self.banner.get_detail('run_card', 'ptj') 4514 # add the pythia8/hwpp path(s) 4515 if self.options['pythia8_path']: 4516 content+='PY8PATH=%s\n' % self.options['pythia8_path'] 4517 if self.options['hwpp_path']: 4518 content+='HWPPPATH=%s\n' % self.options['hwpp_path'] 4519 if self.options['thepeg_path'] and self.options['thepeg_path'] != self.options['hwpp_path']: 4520 content+='THEPEGPATH=%s\n' % self.options['thepeg_path'] 4521 if self.options['hepmc_path'] and self.options['hepmc_path'] != self.options['hwpp_path']: 4522 content+='HEPMCPATH=%s\n' % self.options['hepmc_path'] 4523 4524 output = open(pjoin(self.me_dir, 'MCatNLO', 'banner.dat'), 'w') 4525 output.write(content) 4526 output.close() 4527 return shower
4528 4529
4530 - def run_reweight(self, only):
4531 """runs the reweight_xsec_events executables on each sub-event file generated 4532 to compute on the fly scale and/or PDF uncertainities""" 4533 logger.info(' Doing reweight') 4534 4535 nev_unw = pjoin(self.me_dir, 'SubProcesses', 'nevents_unweighted') 4536 # if only doing reweight, copy back the nevents_unweighted file 4537 if only: 4538 if os.path.exists(nev_unw + '.orig'): 4539 files.cp(nev_unw + '.orig', nev_unw) 4540 else: 4541 raise aMCatNLOError('Cannot find event file information') 4542 4543 #read the nevents_unweighted file to get the list of event files 4544 file = open(nev_unw) 4545 lines = file.read().split('\n') 4546 file.close() 4547 # make copy of the original nevent_unweighted file 4548 files.cp(nev_unw, nev_unw + '.orig') 4549 # loop over lines (all but the last one whith is empty) and check that the 4550 # number of events is not 0 4551 evt_files = [line.split()[0] for line in lines[:-1] if line.split()[1] != '0'] 4552 evt_wghts = [float(line.split()[3]) for line in lines[:-1] if line.split()[1] != '0'] 4553 if self.run_card['event_norm'].lower()=='bias' and self.run_card['nevents'] != 0: 4554 evt_wghts[:]=[1./float(self.run_card['nevents']) for wgt in evt_wghts] 4555 #prepare the job_dict 4556 job_dict = {} 4557 exe = 'reweight_xsec_events.local' 4558 for i, evt_file in enumerate(evt_files): 4559 path, evt = os.path.split(evt_file) 4560 files.ln(pjoin(self.me_dir, 'SubProcesses', exe), \ 4561 pjoin(self.me_dir, 'SubProcesses', path)) 4562 job_dict[path] = [exe] 4563 4564 self.run_all(job_dict, [[evt, '1']], 'Running reweight') 4565 4566 #check that the new event files are complete 4567 for evt_file in evt_files: 4568 last_line = subprocess.Popen(['tail', '-n1', '%s.rwgt' % \ 4569 pjoin(self.me_dir, 'SubProcesses', evt_file)], \ 4570 stdout = subprocess.PIPE).stdout.read().strip() 4571 if last_line != "</LesHouchesEvents>": 4572 raise aMCatNLOError('An error occurred during reweighting. Check the' + \ 4573 '\'reweight_xsec_events.output\' files inside the ' + \ 4574 '\'SubProcesses/P*/G*/\' directories for details') 4575 4576 #update file name in nevents_unweighted 4577 newfile = open(nev_unw, 'w') 4578 for line in lines: 4579 if line: 4580 newfile.write(line.replace(line.split()[0], line.split()[0] + '.rwgt') + '\n') 4581 newfile.close() 4582 4583 return self.pdf_scale_from_reweighting(evt_files,evt_wghts)
4584
4585 - def pdf_scale_from_reweighting(self, evt_files,evt_wghts):
4586 """This function takes the files with the scale and pdf values 4587 written by the reweight_xsec_events.f code 4588 (P*/G*/pdf_scale_dependence.dat) and computes the overall 4589 scale and PDF uncertainty (the latter is computed using the 4590 Hessian method (if lhaid<90000) or Gaussian (if lhaid>90000)) 4591 and returns it in percents. The expected format of the file 4592 is: n_scales xsec_scale_central xsec_scale1 ... n_pdf 4593 xsec_pdf0 xsec_pdf1 ....""" 4594 4595 scales=[] 4596 pdfs=[] 4597 for i,evt_file in enumerate(evt_files): 4598 path, evt=os.path.split(evt_file) 4599 with open(pjoin(self.me_dir, 'SubProcesses', path, 'scale_pdf_dependence.dat'),'r') as f: 4600 data_line=f.readline() 4601 if "scale variations:" in data_line: 4602 for j,scale in enumerate(self.run_card['dynamical_scale_choice']): 4603 data_line = f.readline().split() 4604 scales_this = [float(val)*evt_wghts[i] for val in f.readline().replace("D", "E").split()] 4605 try: 4606 scales[j] = [a + b for a, b in zip(scales[j], scales_this)] 4607 except IndexError: 4608 scales+=[scales_this] 4609 data_line=f.readline() 4610 if "pdf variations:" in data_line: 4611 for j,pdf in enumerate(self.run_card['lhaid']): 4612 data_line = f.readline().split() 4613 pdfs_this = [float(val)*evt_wghts[i] for val in f.readline().replace("D", "E").split()] 4614 try: 4615 pdfs[j] = [a + b for a, b in zip(pdfs[j], pdfs_this)] 4616 except IndexError: 4617 pdfs+=[pdfs_this] 4618 4619 # get the scale uncertainty in percent 4620 scale_info=[] 4621 for j,scale in enumerate(scales): 4622 s_cen=scale[0] 4623 if s_cen != 0.0 and self.run_card['reweight_scale'][j]: 4624 # max and min of the full envelope 4625 s_max=(max(scale)/s_cen-1)*100 4626 s_min=(1-min(scale)/s_cen)*100 4627 # ren and fac scale dependence added in quadrature 4628 ren_var=[] 4629 fac_var=[] 4630 for i in range(len(self.run_card['rw_rscale'])): 4631 ren_var.append(scale[i]-s_cen) # central fac scale 4632 for i in range(len(self.run_card['rw_fscale'])): 4633 fac_var.append(scale[i*len(self.run_card['rw_rscale'])]-s_cen) # central ren scale 4634 s_max_q=((s_cen+math.sqrt(math.pow(max(ren_var),2)+math.pow(max(fac_var),2)))/s_cen-1)*100 4635 s_min_q=(1-(s_cen-math.sqrt(math.pow(min(ren_var),2)+math.pow(min(fac_var),2)))/s_cen)*100 4636 s_size=len(scale) 4637 else: 4638 s_max=0.0 4639 s_min=0.0 4640 s_max_q=0.0 4641 s_min_q=0.0 4642 s_size=len(scale) 4643 scale_info.append({'cen':s_cen, 'min':s_min, 'max':s_max, \ 4644 'min_q':s_min_q, 'max_q':s_max_q, 'size':s_size, \ 4645 'label':self.run_card['dynamical_scale_choice'][j], \ 4646 'unc':self.run_card['reweight_scale'][j]}) 4647 4648 # check if we can use LHAPDF to compute the PDF uncertainty 4649 if any(self.run_card['reweight_pdf']): 4650 use_lhapdf=False 4651 lhapdf_libdir=subprocess.Popen([self.options['lhapdf'],'--libdir'],\ 4652 stdout=subprocess.PIPE).stdout.read().strip() 4653 4654 try: 4655 candidates=[dirname for dirname in os.listdir(lhapdf_libdir) \ 4656 if os.path.isdir(pjoin(lhapdf_libdir,dirname))] 4657 except OSError: 4658 candidates=[] 4659 for candidate in candidates: 4660 if os.path.isfile(pjoin(lhapdf_libdir,candidate,'site-packages','lhapdf.so')): 4661 sys.path.insert(0,pjoin(lhapdf_libdir,candidate,'site-packages')) 4662 try: 4663 import lhapdf 4664 use_lhapdf=True 4665 break 4666 except ImportError: 4667 sys.path.pop(0) 4668 continue 4669 4670 if not use_lhapdf: 4671 try: 4672 candidates=[dirname for dirname in os.listdir(lhapdf_libdir+'64') \ 4673 if os.path.isdir(pjoin(lhapdf_libdir+'64',dirname))] 4674 except OSError: 4675 candidates=[] 4676 for candidate in candidates: 4677 if os.path.isfile(pjoin(lhapdf_libdir+'64',candidate,'site-packages','lhapdf.so')): 4678 sys.path.insert(0,pjoin(lhapdf_libdir+'64',candidate,'site-packages')) 4679 try: 4680 import lhapdf 4681 use_lhapdf=True 4682 break 4683 except ImportError: 4684 sys.path.pop(0) 4685 continue 4686 4687 if not use_lhapdf: 4688 try: 4689 import lhapdf 4690 use_lhapdf=True 4691 except ImportError: 4692 logger.warning("Failed to access python version of LHAPDF: "\ 4693 "cannot compute PDF uncertainty from the "\ 4694 "weights in the events. The weights in the LHE " \ 4695 "event files will still cover all PDF set members, "\ 4696 "but there will be no PDF uncertainty printed in the run summary. \n "\ 4697 "If the python interface to LHAPDF is available on your system, try "\ 4698 "adding its location to the PYTHONPATH environment variable and the"\ 4699 "LHAPDF library location to LD_LIBRARY_PATH (linux) or DYLD_LIBRARY_PATH (mac os x).") 4700 use_lhapdf=False 4701 4702 # turn off lhapdf printing any messages 4703 if any(self.run_card['reweight_pdf']) and use_lhapdf: lhapdf.setVerbosity(0) 4704 4705 pdf_info=[] 4706 for j,pdfset in enumerate(pdfs): 4707 p_cen=pdfset[0] 4708 if p_cen != 0.0 and self.run_card['reweight_pdf'][j]: 4709 if use_lhapdf: 4710 pdfsetname=self.run_card['lhapdfsetname'][j] 4711 try: 4712 p=lhapdf.getPDFSet(pdfsetname) 4713 ep=p.uncertainty(pdfset,-1) 4714 p_cen=ep.central 4715 p_min=abs(ep.errminus/p_cen)*100 4716 p_max=abs(ep.errplus/p_cen)*100 4717 p_type=p.errorType 4718 p_size=p.size 4719 p_conf=p.errorConfLevel 4720 except: 4721 logger.warning("Could not access LHAPDF to compute uncertainties for %s" % pdfsetname) 4722 p_min=0.0 4723 p_max=0.0 4724 p_type='unknown' 4725 p_conf='unknown' 4726 p_size=len(pdfset) 4727 else: 4728 p_min=0.0 4729 p_max=0.0 4730 p_type='unknown' 4731 p_conf='unknown' 4732 p_size=len(pdfset) 4733 pdfsetname=self.run_card['lhaid'][j] 4734 else: 4735 p_min=0.0 4736 p_max=0.0 4737 p_type='none' 4738 p_conf='unknown' 4739 p_size=len(pdfset) 4740 pdfsetname=self.run_card['lhaid'][j] 4741 pdf_info.append({'cen':p_cen, 'min':p_min, 'max':p_max, \ 4742 'unc':p_type, 'name':pdfsetname, 'size':p_size, \ 4743 'label':self.run_card['lhaid'][j], 'conf':p_conf}) 4744 4745 scale_pdf_info=[scale_info,pdf_info] 4746 return scale_pdf_info
4747 4748
4749 - def wait_for_complete(self, run_type):
4750 """this function waits for jobs on cluster to complete their run.""" 4751 starttime = time.time() 4752 #logger.info(' Waiting for submitted jobs to complete') 4753 update_status = lambda i, r, f: self.update_status((i, r, f, run_type), 4754 starttime=starttime, level='parton', update_results=True) 4755 try: 4756 self.cluster.wait(self.me_dir, update_status) 4757 except: 4758 self.cluster.remove() 4759 raise
4760
4761 - def run_all(self, job_dict, arg_list, run_type='monitor', split_jobs = False):
4762 """runs the jobs in job_dict (organized as folder: [job_list]), with arguments args""" 4763 self.ijob = 0 4764 if run_type != 'shower': 4765 self.njobs = sum(len(jobs) for jobs in job_dict.values()) * len(arg_list) 4766 for args in arg_list: 4767 for Pdir, jobs in job_dict.items(): 4768 for job in jobs: 4769 self.run_exe(job, args, run_type, cwd=pjoin(self.me_dir, 'SubProcesses', Pdir) ) 4770 if self.cluster_mode == 2: 4771 time.sleep(1) # security to allow all jobs to be launched 4772 else: 4773 self.njobs = len(arg_list) 4774 for args in arg_list: 4775 [(cwd, exe)] = job_dict.items() 4776 self.run_exe(exe, args, run_type, cwd) 4777 4778 self.wait_for_complete(run_type)
4779 4780 4781
4782 - def check_event_files(self,jobs):
4783 """check the integrity of the event files after splitting, and resubmit 4784 those which are not nicely terminated""" 4785 jobs_to_resubmit = [] 4786 for job in jobs: 4787 last_line = '' 4788 try: 4789 last_line = subprocess.Popen( 4790 ['tail', '-n1', pjoin(job['dirname'], 'events.lhe')], \ 4791 stdout = subprocess.PIPE).stdout.read().strip() 4792 except IOError: 4793 pass 4794 if last_line != "</LesHouchesEvents>": 4795 jobs_to_resubmit.append(job) 4796 self.njobs = 0 4797 if jobs_to_resubmit: 4798 run_type = 'Resubmitting broken jobs' 4799 logger.info('Some event files are broken, corresponding jobs will be resubmitted.') 4800 for job in jobs_to_resubmit: 4801 logger.debug('Resubmitting ' + job['dirname'] + '\n') 4802 self.run_all_jobs(jobs_to_resubmit,2,fixed_order=False)
4803 4804
4805 - def find_jobs_to_split(self, pdir, job, arg):
4806 """looks into the nevents_unweighed_splitted file to check how many 4807 split jobs are needed for this (pdir, job). arg is F, B or V""" 4808 # find the number of the integration channel 4809 splittings = [] 4810 ajob = open(pjoin(self.me_dir, 'SubProcesses', pdir, job)).read() 4811 pattern = re.compile('for i in (\d+) ; do') 4812 match = re.search(pattern, ajob) 4813 channel = match.groups()[0] 4814 # then open the nevents_unweighted_splitted file and look for the 4815 # number of splittings to be done 4816 nevents_file = open(pjoin(self.me_dir, 'SubProcesses', 'nevents_unweighted_splitted')).read() 4817 # This skips the channels with zero events, because they are 4818 # not of the form GFXX_YY, but simply GFXX 4819 pattern = re.compile(r"%s_(\d+)/events.lhe" % \ 4820 pjoin(pdir, 'G%s%s' % (arg,channel))) 4821 matches = re.findall(pattern, nevents_file) 4822 for m in matches: 4823 splittings.append(m) 4824 return splittings
4825 4826
4827 - def run_exe(self, exe, args, run_type, cwd=None):
4828 """this basic function launch locally/on cluster exe with args as argument. 4829 """ 4830 # first test that exe exists: 4831 execpath = None 4832 if cwd and os.path.exists(pjoin(cwd, exe)): 4833 execpath = pjoin(cwd, exe) 4834 elif not cwd and os.path.exists(exe): 4835 execpath = exe 4836 else: 4837 raise aMCatNLOError('Cannot find executable %s in %s' \ 4838 % (exe, os.getcwd())) 4839 # check that the executable has exec permissions 4840 if self.cluster_mode == 1 and not os.access(execpath, os.X_OK): 4841 subprocess.call(['chmod', '+x', exe], cwd=cwd) 4842 # finally run it 4843 if self.cluster_mode == 0: 4844 #this is for the serial run 4845 misc.call(['./'+exe] + args, cwd=cwd) 4846 self.ijob += 1 4847 self.update_status((max([self.njobs - self.ijob - 1, 0]), 4848 min([1, self.njobs - self.ijob]), 4849 self.ijob, run_type), level='parton') 4850 4851 #this is for the cluster/multicore run 4852 elif 'reweight' in exe: 4853 # a reweight run 4854 # Find the correct PDF input file 4855 input_files, output_files = [], [] 4856 pdfinput = self.get_pdf_input_filename() 4857 if os.path.exists(pdfinput): 4858 input_files.append(pdfinput) 4859 input_files.append(pjoin(os.path.dirname(exe), os.path.pardir, 'reweight_xsec_events')) 4860 input_files.append(pjoin(cwd, os.path.pardir, 'leshouche_info.dat')) 4861 input_files.append(args[0]) 4862 output_files.append('%s.rwgt' % os.path.basename(args[0])) 4863 output_files.append('reweight_xsec_events.output') 4864 output_files.append('scale_pdf_dependence.dat') 4865 4866 return self.cluster.submit2(exe, args, cwd=cwd, 4867 input_files=input_files, output_files=output_files, 4868 required_output=output_files) 4869 4870 elif 'ajob' in exe: 4871 # the 'standard' amcatnlo job 4872 # check if args is a list of string 4873 if type(args[0]) == str: 4874 input_files, output_files, required_output, args = self.getIO_ajob(exe,cwd,args) 4875 #submitting 4876 self.cluster.submit2(exe, args, cwd=cwd, 4877 input_files=input_files, output_files=output_files, 4878 required_output=required_output) 4879 4880 # # keep track of folders and arguments for splitted evt gen 4881 # subfolder=output_files[-1].split('/')[0] 4882 # if len(args) == 4 and '_' in subfolder: 4883 # self.split_folders[pjoin(cwd,subfolder)] = [exe] + args 4884 4885 elif 'shower' in exe: 4886 # a shower job 4887 # args are [shower, output(HEP or TOP), run_name] 4888 # cwd is the shower rundir, where the executable are found 4889 input_files, output_files = [], [] 4890 shower = args[0] 4891 # the input files 4892 if shower == 'PYTHIA8': 4893 input_files.append(pjoin(cwd, 'Pythia8.exe')) 4894 input_files.append(pjoin(cwd, 'Pythia8.cmd')) 4895 if os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')): 4896 input_files.append(pjoin(cwd, 'config.sh')) 4897 input_files.append(pjoin(self.options['pythia8_path'], 'xmldoc')) 4898 else: 4899 input_files.append(pjoin(self.options['pythia8_path'], 'share/Pythia8/xmldoc')) 4900 else: 4901 input_files.append(pjoin(cwd, 'MCATNLO_%s_EXE' % shower)) 4902 input_files.append(pjoin(cwd, 'MCATNLO_%s_input' % shower)) 4903 if shower == 'HERWIGPP': 4904 if os.path.exists(pjoin(self.options['hwpp_path'], 'bin', 'Herwig++')): 4905 input_files.append(pjoin(cwd, 'Herwig++')) 4906 if os.path.exists(pjoin(self.options['hwpp_path'], 'bin', 'Herwig')): 4907 input_files.append(pjoin(cwd, 'Herwig')) 4908 input_files.append(pjoin(cwd, 'HepMCFortran.so')) 4909 if len(args) == 3: 4910 if os.path.exists(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz')): 4911 input_files.append(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz')) 4912 elif os.path.exists(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe')): 4913 input_files.append(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe')) 4914 else: 4915 raise aMCatNLOError, 'Event file not present in %s' % \ 4916 pjoin(self.me_dir, 'Events', self.run_name) 4917 else: 4918 input_files.append(pjoin(cwd, 'events_%s.lhe' % args[3])) 4919 # the output files 4920 if len(args) == 3: 4921 output_files.append('mcatnlo_run.log') 4922 else: 4923 output_files.append('mcatnlo_run_%s.log' % args[3]) 4924 if args[1] == 'HEP': 4925 if len(args) == 3: 4926 fname = 'events' 4927 else: 4928 fname = 'events_%s' % args[3] 4929 if shower in ['PYTHIA8', 'HERWIGPP']: 4930 output_files.append(fname + '.hepmc.gz') 4931 else: 4932 output_files.append(fname + '.hep.gz') 4933 elif args[1] == 'TOP' or args[1] == 'HWU': 4934 if len(args) == 3: 4935 fname = 'histfile' 4936 else: 4937 fname = 'histfile_%s' % args[3] 4938 output_files.append(fname + '.tar') 4939 else: 4940 raise aMCatNLOError, 'Not a valid output argument for shower job : %d' % args[1] 4941 #submitting 4942 self.cluster.submit2(exe, args, cwd=cwd, 4943 input_files=input_files, output_files=output_files) 4944 4945 else: 4946 return self.cluster.submit(exe, args, cwd=cwd)
4947
4948 - def getIO_ajob(self,exe,cwd, args):
4949 # use local disk if possible => need to stands what are the 4950 # input/output files 4951 4952 output_files = [] 4953 required_output = [] 4954 input_files = [pjoin(self.me_dir, 'SubProcesses', 'randinit'), 4955 pjoin(cwd, 'symfact.dat'), 4956 pjoin(cwd, 'iproc.dat'), 4957 pjoin(cwd, 'initial_states_map.dat'), 4958 pjoin(cwd, 'configs_and_props_info.dat'), 4959 pjoin(cwd, 'leshouche_info.dat'), 4960 pjoin(cwd, 'FKS_params.dat')] 4961 4962 # For GoSam interface, we must copy the SLHA card as well 4963 if os.path.exists(pjoin(self.me_dir,'OLP_virtuals','gosam.rc')): 4964 input_files.append(pjoin(self.me_dir, 'Cards', 'param_card.dat')) 4965 4966 if os.path.exists(pjoin(cwd,'nevents.tar')): 4967 input_files.append(pjoin(cwd,'nevents.tar')) 4968 4969 if os.path.exists(pjoin(self.me_dir,'SubProcesses','OLE_order.olc')): 4970 input_files.append(pjoin(cwd, 'OLE_order.olc')) 4971 4972 # File for the loop (might not be present if MadLoop is not used) 4973 if os.path.exists(pjoin(cwd,'MadLoop5_resources.tar.gz')) and \ 4974 cluster.need_transfer(self.options): 4975 input_files.append(pjoin(cwd, 'MadLoop5_resources.tar.gz')) 4976 elif os.path.exists(pjoin(cwd,'MadLoop5_resources')) and \ 4977 cluster.need_transfer(self.options): 4978 tf=tarfile.open(pjoin(cwd,'MadLoop5_resources.tar.gz'),'w:gz', 4979 dereference=True) 4980 tf.add(pjoin(cwd,'MadLoop5_resources'),arcname='MadLoop5_resources') 4981 tf.close() 4982 input_files.append(pjoin(cwd, 'MadLoop5_resources.tar.gz')) 4983 4984 if args[1] == 'born' or args[1] == 'all': 4985 # MADEVENT MINT FO MODE 4986 input_files.append(pjoin(cwd, 'madevent_mintFO')) 4987 if args[2] == '0': 4988 current = '%s_G%s' % (args[1],args[0]) 4989 else: 4990 current = '%s_G%s_%s' % (args[1],args[0],args[2]) 4991 if os.path.exists(pjoin(cwd,current)): 4992 input_files.append(pjoin(cwd, current)) 4993 output_files.append(current) 4994 4995 required_output.append('%s/results.dat' % current) 4996 required_output.append('%s/res_%s.dat' % (current,args[3])) 4997 required_output.append('%s/log_MINT%s.txt' % (current,args[3])) 4998 required_output.append('%s/mint_grids' % current) 4999 required_output.append('%s/grid.MC_integer' % current) 5000 if args[3] != '0': 5001 required_output.append('%s/scale_pdf_dependence.dat' % current) 5002 5003 elif args[1] == 'F' or args[1] == 'B': 5004 # MINTMC MODE 5005 input_files.append(pjoin(cwd, 'madevent_mintMC')) 5006 5007 if args[2] == '0': 5008 current = 'G%s%s' % (args[1],args[0]) 5009 else: 5010 current = 'G%s%s_%s' % (args[1],args[0],args[2]) 5011 if os.path.exists(pjoin(cwd,current)): 5012 input_files.append(pjoin(cwd, current)) 5013 output_files.append(current) 5014 if args[2] > '0': 5015 # this is for the split event generation 5016 output_files.append('G%s%s_%s' % (args[1], args[0], args[2])) 5017 required_output.append('G%s%s_%s/log_MINT%s.txt' % (args[1],args[0],args[2],args[3])) 5018 5019 else: 5020 required_output.append('%s/log_MINT%s.txt' % (current,args[3])) 5021 if args[3] in ['0','1']: 5022 required_output.append('%s/results.dat' % current) 5023 if args[3] == '1': 5024 output_files.append('%s/results.dat' % current) 5025 5026 else: 5027 raise aMCatNLOError, 'not valid arguments: %s' %(', '.join(args)) 5028 5029 #Find the correct PDF input file 5030 pdfinput = self.get_pdf_input_filename() 5031 if os.path.exists(pdfinput): 5032 input_files.append(pdfinput) 5033 return input_files, output_files, required_output, args
5034 5035
5036 - def compile(self, mode, options):
5037 """compiles aMC@NLO to compute either NLO or NLO matched to shower, as 5038 specified in mode""" 5039 5040 os.mkdir(pjoin(self.me_dir, 'Events', self.run_name)) 5041 5042 self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, 5043 '%s_%s_banner.txt' % (self.run_name, self.run_tag))) 5044 5045 self.get_characteristics(pjoin(self.me_dir, 5046 'SubProcesses', 'proc_characteristics')) 5047 5048 #define a bunch of log files 5049 amcatnlo_log = pjoin(self.me_dir, 'compile_amcatnlo.log') 5050 madloop_log = pjoin(self.me_dir, 'compile_madloop.log') 5051 reweight_log = pjoin(self.me_dir, 'compile_reweight.log') 5052 test_log = pjoin(self.me_dir, 'test.log') 5053 5054 # environmental variables to be included in make_opts 5055 self.make_opts_var = {} 5056 if self.proc_characteristics['has_loops'] and \ 5057 not os.path.exists(pjoin(self.me_dir,'OLP_virtuals')): 5058 self.make_opts_var['madloop'] = 'true' 5059 5060 self.update_status('Compiling the code', level=None, update_results=True) 5061 5062 libdir = pjoin(self.me_dir, 'lib') 5063 sourcedir = pjoin(self.me_dir, 'Source') 5064 5065 #clean files 5066 files.rm([amcatnlo_log, madloop_log, reweight_log, test_log]) 5067 #define which executable/tests to compile 5068 if '+' in mode: 5069 mode = mode.split('+')[0] 5070 if mode in ['NLO', 'LO']: 5071 exe = 'madevent_mintFO' 5072 tests = ['test_ME'] 5073 self.analyse_card.write_card(pjoin(self.me_dir, 'SubProcesses', 'analyse_opts')) 5074 elif mode in ['aMC@NLO', 'aMC@LO','noshower','noshowerLO']: 5075 exe = 'madevent_mintMC' 5076 tests = ['test_ME', 'test_MC'] 5077 # write an analyse_opts with a dummy analysis so that compilation goes through 5078 with open(pjoin(self.me_dir, 'SubProcesses', 'analyse_opts'),'w') as fsock: 5079 fsock.write('FO_ANALYSE=analysis_dummy.o dbook.o open_output_files_dummy.o HwU_dummy.o\n') 5080 5081 #directory where to compile exe 5082 p_dirs = [d for d in \ 5083 open(pjoin(self.me_dir, 'SubProcesses', 'subproc.mg')).read().split('\n') if d] 5084 # create param_card.inc and run_card.inc 5085 self.do_treatcards('', amcatnlo=True, mode=mode) 5086 # if --nocompile option is specified, check here that all exes exists. 5087 # If they exists, return 5088 if all([os.path.exists(pjoin(self.me_dir, 'SubProcesses', p_dir, exe)) \ 5089 for p_dir in p_dirs]) and options['nocompile']: 5090 return 5091 5092 # rm links to lhapdflib/ PDFsets if exist 5093 if os.path.exists(pjoin(libdir, 'PDFsets')): 5094 files.rm(pjoin(libdir, 'PDFsets')) 5095 5096 # read the run_card to find if lhapdf is used or not 5097 if self.run_card['pdlabel'] == 'lhapdf' and \ 5098 (self.banner.get_detail('run_card', 'lpp1') != 0 or \ 5099 self.banner.get_detail('run_card', 'lpp2') != 0): 5100 5101 self.link_lhapdf(libdir, [pjoin('SubProcesses', p) for p in p_dirs]) 5102 pdfsetsdir = self.get_lhapdf_pdfsetsdir() 5103 lhaid_list = self.run_card['lhaid'] 5104 self.copy_lhapdf_set(lhaid_list, pdfsetsdir) 5105 5106 else: 5107 if self.run_card['lpp1'] == 1 == self.run_card['lpp2']: 5108 logger.info('Using built-in libraries for PDFs') 5109 5110 self.make_opts_var['lhapdf'] = "" 5111 5112 # read the run_card to find if applgrid is used or not 5113 if self.run_card['iappl'] != 0: 5114 self.make_opts_var['applgrid'] = 'True' 5115 # check versions of applgrid and amcfast 5116 for code in ['applgrid','amcfast']: 5117 try: 5118 p = subprocess.Popen([self.options[code], '--version'], \ 5119 stdout=subprocess.PIPE, stderr=subprocess.PIPE) 5120 except OSError: 5121 raise aMCatNLOError(('No valid %s installation found. \n' + \ 5122 'Please set the path to %s-config by using \n' + \ 5123 'MG5_aMC> set <absolute-path-to-%s>/bin/%s-config \n') % (code,code,code,code)) 5124 else: 5125 output, _ = p.communicate() 5126 if code is 'applgrid' and output < '1.4.63': 5127 raise aMCatNLOError('Version of APPLgrid is too old. Use 1.4.69 or later.'\ 5128 +' You are using %s',output) 5129 if code is 'amcfast' and output < '1.1.1': 5130 raise aMCatNLOError('Version of aMCfast is too old. Use 1.1.1 or later.'\ 5131 +' You are using %s',output) 5132 5133 # set-up the Source/make_opts with the correct applgrid-config file 5134 appllibs=" APPLLIBS=$(shell %s --ldflags) $(shell %s --ldcflags) \n" \ 5135 % (self.options['amcfast'],self.options['applgrid']) 5136 text=open(pjoin(self.me_dir,'Source','make_opts'),'r').readlines() 5137 text_out=[] 5138 for line in text: 5139 if line.strip().startswith('APPLLIBS=$'): 5140 line=appllibs 5141 text_out.append(line) 5142 with open(pjoin(self.me_dir,'Source','make_opts'),'w') as fsock: 5143 fsock.writelines(text_out) 5144 else: 5145 self.make_opts_var['applgrid'] = "" 5146 5147 if 'fastjet' in self.options.keys() and self.options['fastjet']: 5148 self.make_opts_var['fastjet_config'] = self.options['fastjet'] 5149 5150 # add the make_opts_var to make_opts 5151 self.update_make_opts() 5152 5153 # make Source 5154 self.update_status('Compiling source...', level=None) 5155 misc.compile(['clean4pdf'], cwd = sourcedir) 5156 misc.compile(cwd = sourcedir) 5157 if os.path.exists(pjoin(libdir, 'libdhelas.a')) \ 5158 and os.path.exists(pjoin(libdir, 'libgeneric.a')) \ 5159 and os.path.exists(pjoin(libdir, 'libmodel.a')) \ 5160 and os.path.exists(pjoin(libdir, 'libpdf.a')): 5161 logger.info(' ...done, continuing with P* directories') 5162 else: 5163 raise aMCatNLOError('Compilation failed') 5164 5165 # make StdHep (only necessary with MG option output_dependencies='internal') 5166 MCatNLO_libdir = pjoin(self.me_dir, 'MCatNLO', 'lib') 5167 if not os.path.exists(os.path.realpath(pjoin(MCatNLO_libdir, 'libstdhep.a'))) or \ 5168 not os.path.exists(os.path.realpath(pjoin(MCatNLO_libdir, 'libFmcfio.a'))): 5169 if os.path.exists(pjoin(sourcedir,'StdHEP')): 5170 logger.info('Compiling StdHEP ...') 5171 misc.compile(['StdHEP'], cwd = sourcedir) 5172 logger.info(' ...done.') 5173 else: 5174 raise aMCatNLOError('Could not compile StdHEP because its'+\ 5175 ' source directory could not be found in the SOURCE folder.\n'+\ 5176 " Check the MG5_aMC option 'output_dependencies'.") 5177 5178 # make CutTools (only necessary with MG option output_dependencies='internal') 5179 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 5180 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 5181 if os.path.exists(pjoin(sourcedir,'CutTools')): 5182 logger.info('Compiling CutTools (can take a couple of minutes) ...') 5183 misc.compile(['CutTools','-j1'], cwd = sourcedir, nb_core=1) 5184 logger.info(' ...done.') 5185 else: 5186 raise aMCatNLOError('Could not compile CutTools because its'+\ 5187 ' source directory could not be found in the SOURCE folder.\n'+\ 5188 " Check the MG5_aMC option 'output_dependencies.'") 5189 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 5190 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 5191 raise aMCatNLOError('CutTools compilation failed.') 5192 5193 # Verify compatibility between current compiler and the one which was 5194 # used when last compiling CutTools (if specified). 5195 compiler_log_path = pjoin(os.path.dirname((os.path.realpath(pjoin( 5196 libdir, 'libcts.a')))),'compiler_version.log') 5197 if os.path.exists(compiler_log_path): 5198 compiler_version_used = open(compiler_log_path,'r').read() 5199 if not str(misc.get_gfortran_version(misc.detect_current_compiler(\ 5200 pjoin(sourcedir,'make_opts')))) in compiler_version_used: 5201 if os.path.exists(pjoin(sourcedir,'CutTools')): 5202 logger.info('CutTools was compiled with a different fortran'+\ 5203 ' compiler. Re-compiling it now...') 5204 misc.compile(['cleanCT'], cwd = sourcedir) 5205 misc.compile(['CutTools','-j1'], cwd = sourcedir, nb_core=1) 5206 logger.info(' ...done.') 5207 else: 5208 raise aMCatNLOError("CutTools installation in %s"\ 5209 %os.path.realpath(pjoin(libdir, 'libcts.a'))+\ 5210 " seems to have been compiled with a different compiler than"+\ 5211 " the one specified in MG5_aMC. Please recompile CutTools.") 5212 5213 # make IREGI (only necessary with MG option output_dependencies='internal') 5214 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libiregi.a'))) \ 5215 and os.path.exists(pjoin(sourcedir,'IREGI')): 5216 logger.info('Compiling IREGI (can take a couple of minutes) ...') 5217 misc.compile(['IREGI'], cwd = sourcedir) 5218 logger.info(' ...done.') 5219 5220 if os.path.exists(pjoin(libdir, 'libiregi.a')): 5221 # Verify compatibility between current compiler and the one which was 5222 # used when last compiling IREGI (if specified). 5223 compiler_log_path = pjoin(os.path.dirname((os.path.realpath(pjoin( 5224 libdir, 'libiregi.a')))),'compiler_version.log') 5225 if os.path.exists(compiler_log_path): 5226 compiler_version_used = open(compiler_log_path,'r').read() 5227 if not str(misc.get_gfortran_version(misc.detect_current_compiler(\ 5228 pjoin(sourcedir,'make_opts')))) in compiler_version_used: 5229 if os.path.exists(pjoin(sourcedir,'IREGI')): 5230 logger.info('IREGI was compiled with a different fortran'+\ 5231 ' compiler. Re-compiling it now...') 5232 misc.compile(['cleanIR'], cwd = sourcedir) 5233 misc.compile(['IREGI'], cwd = sourcedir) 5234 logger.info(' ...done.') 5235 else: 5236 raise aMCatNLOError("IREGI installation in %s"\ 5237 %os.path.realpath(pjoin(libdir, 'libiregi.a'))+\ 5238 " seems to have been compiled with a different compiler than"+\ 5239 " the one specified in MG5_aMC. Please recompile IREGI.") 5240 5241 # check if MadLoop virtuals have been generated 5242 if self.proc_characteristics['has_loops'] and \ 5243 not os.path.exists(pjoin(self.me_dir,'OLP_virtuals')): 5244 if mode in ['NLO', 'aMC@NLO', 'noshower']: 5245 tests.append('check_poles') 5246 5247 # make and run tests (if asked for), gensym and make madevent in each dir 5248 self.update_status('Compiling directories...', level=None) 5249 5250 for test in tests: 5251 self.write_test_input(test) 5252 5253 try: 5254 import multiprocessing 5255 if not self.nb_core: 5256 try: 5257 self.nb_core = int(self.options['nb_core']) 5258 except TypeError: 5259 self.nb_core = multiprocessing.cpu_count() 5260 except ImportError: 5261 self.nb_core = 1 5262 5263 compile_options = copy.copy(self.options) 5264 compile_options['nb_core'] = self.nb_core 5265 compile_cluster = cluster.MultiCore(**compile_options) 5266 logger.info('Compiling on %d cores' % self.nb_core) 5267 5268 update_status = lambda i, r, f: self.donothing(i,r,f) 5269 for p_dir in p_dirs: 5270 compile_cluster.submit(prog = compile_dir, 5271 argument = [self.me_dir, p_dir, mode, options, 5272 tests, exe, self.options['run_mode']]) 5273 try: 5274 compile_cluster.wait(self.me_dir, update_status) 5275 except Exception, error: 5276 logger.warning("Compilation of the Subprocesses failed") 5277 if __debug__: 5278 raise 5279 compile_cluster.remove() 5280 self.do_quit('') 5281 5282 logger.info('Checking test output:') 5283 for p_dir in p_dirs: 5284 logger.info(p_dir) 5285 for test in tests: 5286 logger.info(' Result for %s:' % test) 5287 5288 this_dir = pjoin(self.me_dir, 'SubProcesses', p_dir) 5289 #check that none of the tests failed 5290 self.check_tests(test, this_dir)
5291 5292
5293 - def donothing(*args):
5294 pass
5295 5296
5297 - def check_tests(self, test, dir):
5298 """just call the correct parser for the test log. 5299 Skip check_poles for LOonly folders""" 5300 if test in ['test_ME', 'test_MC']: 5301 return self.parse_test_mx_log(pjoin(dir, '%s.log' % test)) 5302 elif test == 'check_poles' and not os.path.exists(pjoin(dir,'parton_lum_0.f')): 5303 return self.parse_check_poles_log(pjoin(dir, '%s.log' % test))
5304 5305
5306 - def parse_test_mx_log(self, log):
5307 """read and parse the test_ME/MC.log file""" 5308 content = open(log).read() 5309 if 'FAILED' in content: 5310 logger.info('Output of the failing test:\n'+content[:-1],'$MG:BOLD') 5311 raise aMCatNLOError('Some tests failed, run cannot continue. Please search on https://answers.launchpad.net/mg5amcnlo for more information, and in case there is none, report the problem there.') 5312 else: 5313 lines = [l for l in content.split('\n') if 'PASSED' in l] 5314 logger.info(' Passed.') 5315 logger.debug('\n'+'\n'.join(lines))
5316 5317
5318 - def parse_check_poles_log(self, log):
5319 """reads and parse the check_poles.log file""" 5320 content = open(log).read() 5321 npass = 0 5322 nfail = 0 5323 for line in content.split('\n'): 5324 if 'PASSED' in line: 5325 npass +=1 5326 tolerance = float(line.split()[1]) 5327 if 'FAILED' in line: 5328 nfail +=1 5329 tolerance = float(line.split()[1]) 5330 5331 if nfail + npass == 0: 5332 logger.warning('0 points have been tried') 5333 return 5334 5335 if float(nfail)/float(nfail+npass) > 0.1: 5336 raise aMCatNLOError('Poles do not cancel, run cannot continue') 5337 else: 5338 logger.info(' Poles successfully cancel for %d points over %d (tolerance=%2.1e)' \ 5339 %(npass, nfail+npass, tolerance))
5340 5341
5342 - def write_test_input(self, test):
5343 """write the input files to run test_ME/MC or check_poles""" 5344 if test in ['test_ME', 'test_MC']: 5345 content = "-2 -2\n" #generate randomly energy/angle 5346 content+= "100 100\n" #run 100 points for soft and collinear tests 5347 content+= "0\n" #all FKS configs 5348 content+= '\n'.join(["-1"] * 50) #random diagram (=first diagram) 5349 elif test == 'check_poles': 5350 content = '20 \n -1\n' 5351 5352 file = open(pjoin(self.me_dir, '%s_input.txt' % test), 'w') 5353 if test == 'test_MC': 5354 shower = self.run_card['parton_shower'] 5355 header = "1 \n %s\n 1 -0.1\n-1 -0.1\n" % shower 5356 file.write(header + content) 5357 elif test == 'test_ME': 5358 header = "2 \n" 5359 file.write(header + content) 5360 else: 5361 file.write(content) 5362 file.close()
5363 5364 5365 action_switcher = AskRunNLO 5366 ############################################################################
5367 - def ask_run_configuration(self, mode, options, switch={}):
5368 """Ask the question when launching generate_events/multi_run""" 5369 5370 if 'parton' not in options: 5371 options['parton'] = False 5372 if 'reweightonly' not in options: 5373 options['reweightonly'] = False 5374 5375 if mode == 'auto': 5376 mode = None 5377 if not mode and (options['parton'] or options['reweightonly']): 5378 mode = 'noshower' 5379 5380 passing_cmd = [] 5381 for key,value in switch.keys(): 5382 passing_cmd.append('%s=%s' % (key,value)) 5383 5384 if 'do_reweight' in options and options['do_reweight']: 5385 passing_cmd.append('reweight=ON') 5386 if 'do_madspin' in options and options['do_madspin']: 5387 passing_cmd.append('madspin=ON') 5388 5389 force = self.force 5390 if mode == 'onlyshower': 5391 passing_cmd.append('onlyshower') 5392 force = True 5393 elif mode: 5394 passing_cmd.append(mode) 5395 ####mode = None # allow to overwrite it due to EW 5396 5397 switch, cmd_switch = self.ask('', '0', [], ask_class = self.action_switcher, 5398 mode=mode, force=force, 5399 first_cmd=passing_cmd, 5400 return_instance=True) 5401 5402 if 'mode' in switch: 5403 mode = switch['mode'] 5404 #assign the mode depending of the switch 5405 if not mode or mode == 'auto': 5406 if switch['order'] == 'LO': 5407 if switch['runshower']: 5408 mode = 'aMC@LO' 5409 elif switch['fixed_order'] == 'ON': 5410 mode = 'LO' 5411 else: 5412 mode = 'noshowerLO' 5413 elif switch['order'] == 'NLO': 5414 if switch['runshower']: 5415 mode = 'aMC@NLO' 5416 elif switch['fixed_order'] == 'ON': 5417 mode = 'NLO' 5418 else: 5419 mode = 'noshower' 5420 logger.info('will run in mode: %s' % mode) 5421 5422 if mode == 'noshower': 5423 if switch['shower'] == 'OFF': 5424 logger.warning("""You have chosen not to run a parton shower. NLO events without showering are NOT physical. Please, shower the LesHouches events before using them for physics analyses. You have to choose NOW which parton-shower you WILL use and specify it in the run_card.""") 5425 else: 5426 logger.info("""Your parton-shower choice is not available for running. Events will be generated for the associated parton shower. Remember that NLO events without showering are NOT physical.""", '$MG:BOLD') 5427 5428 5429 # specify the cards which are needed for this run. 5430 cards = ['param_card.dat', 'run_card.dat'] 5431 ignore = [] 5432 if mode in ['LO', 'NLO']: 5433 options['parton'] = True 5434 ignore = ['shower_card.dat', 'madspin_card.dat'] 5435 cards.append('FO_analyse_card.dat') 5436 else: 5437 if switch['madspin'] != 'OFF': 5438 cards.append('madspin_card.dat') 5439 if switch['reweight'] != 'OFF': 5440 cards.append('reweight_card.dat') 5441 if switch['madanalysis'] in ['HADRON', 'ON']: 5442 cards.append('madanalysis5_hadron_card.dat') 5443 if 'aMC@' in mode: 5444 cards.append('shower_card.dat') 5445 if mode == 'onlyshower': 5446 cards = ['shower_card.dat'] 5447 if options['reweightonly']: 5448 cards = ['run_card.dat'] 5449 5450 self.keep_cards(cards, ignore) 5451 5452 if mode =='onlyshower': 5453 cards = ['shower_card.dat'] 5454 5455 5456 # automatically switch to keep_wgt option 5457 first_cmd = cmd_switch.get_cardcmd() 5458 5459 if not options['force'] and not self.force: 5460 self.ask_edit_cards(cards, plot=False, first_cmd=first_cmd) 5461 5462 self.banner = banner_mod.Banner() 5463 5464 # store the cards in the banner 5465 for card in cards: 5466 self.banner.add(pjoin(self.me_dir, 'Cards', card)) 5467 # and the run settings 5468 run_settings = '\n'.join(['%s = %s' % (k, v) for (k, v) in switch.items()]) 5469 self.banner.add_text('run_settings', run_settings) 5470 5471 if not mode =='onlyshower': 5472 self.run_card = self.banner.charge_card('run_card') 5473 self.run_tag = self.run_card['run_tag'] 5474 #this is if the user did not provide a name for the current run 5475 if not hasattr(self, 'run_name') or not self.run_name: 5476 self.run_name = self.find_available_run_name(self.me_dir) 5477 #add a tag in the run_name for distinguish run_type 5478 if self.run_name.startswith('run_'): 5479 if mode in ['LO','aMC@LO','noshowerLO']: 5480 self.run_name += '_LO' 5481 self.set_run_name(self.run_name, self.run_tag, 'parton') 5482 if self.run_card['ickkw'] == 3 and mode in ['LO', 'aMC@LO', 'noshowerLO']: 5483 raise self.InvalidCmd("""FxFx merging (ickkw=3) not allowed at LO""") 5484 elif self.run_card['ickkw'] == 3 and mode in ['aMC@NLO', 'noshower']: 5485 logger.warning("""You are running with FxFx merging enabled. To be able to merge samples of various multiplicities without double counting, you have to remove some events after showering 'by hand'. Please read http://amcatnlo.cern.ch/FxFx_merging.htm for more details.""") 5486 if self.run_card['parton_shower'].upper() == 'PYTHIA6Q': 5487 raise self.InvalidCmd("""FxFx merging does not work with Pythia6's Q-squared ordered showers.""") 5488 elif self.run_card['parton_shower'].upper() != 'HERWIG6' and self.run_card['parton_shower'].upper() != 'PYTHIA8' and self.run_card['parton_shower'].upper() != 'HERWIGPP': 5489 question="FxFx merging not tested for %s shower. Do you want to continue?\n" % self.run_card['parton_shower'] + \ 5490 "Type \'n\' to stop or \'y\' to continue" 5491 answers = ['n','y'] 5492 answer = self.ask(question, 'n', answers, alias=alias) 5493 if answer == 'n': 5494 error = '''Stop opertation''' 5495 self.ask_run_configuration(mode, options) 5496 # raise aMCatNLOError(error) 5497 elif self.run_card['ickkw'] == -1 and mode in ['aMC@NLO', 'noshower']: 5498 # NNLL+NLO jet-veto only possible for LO event generation or fNLO runs. 5499 raise self.InvalidCmd("""NNLL+NLO jet veto runs (ickkw=-1) only possible for fNLO or LO.""") 5500 if 'aMC@' in mode or mode == 'onlyshower': 5501 self.shower_card = self.banner.charge_card('shower_card') 5502 5503 elif mode in ['LO', 'NLO']: 5504 analyse_card_path = pjoin(self.me_dir, 'Cards','FO_analyse_card.dat') 5505 self.analyse_card = self.banner.charge_card('FO_analyse_card') 5506 5507 return mode
5508
5509 5510 #=============================================================================== 5511 # aMCatNLOCmd 5512 #=============================================================================== 5513 -class aMCatNLOCmdShell(aMCatNLOCmd, cmd.CmdShell):
5514 """The command line processor of MadGraph"""
5515 5516 _compile_usage = "compile [MODE] [options]\n" + \ 5517 "-- compiles aMC@NLO \n" + \ 5518 " MODE can be either FO, for fixed-order computations, \n" + \ 5519 " or MC for matching with parton-shower monte-carlos. \n" + \ 5520 " (if omitted, it is set to MC)\n" 5521 _compile_parser = misc.OptionParser(usage=_compile_usage) 5522 _compile_parser.add_option("-f", "--force", default=False, action='store_true', 5523 help="Use the card present in the directory for the launch, without editing them") 5524 5525 _launch_usage = "launch [MODE] [options]\n" + \ 5526 "-- execute aMC@NLO \n" + \ 5527 " MODE can be either LO, NLO, aMC@NLO or aMC@LO (if omitted, it is asked in a separate question)\n" + \ 5528 " If mode is set to LO/NLO, no event generation will be performed, but only the \n" + \ 5529 " computation of the total cross section and the filling of parton-level histograms.\n" + \ 5530 " If mode is set to aMC@LO/aMC@NLO, after the cross-section computation, a .lhe \n" + \ 5531 " event file is generated which will be showered with the MonteCarlo specified \n" + \ 5532 " in the run_card.dat\n" 5533 5534 _launch_parser = misc.OptionParser(usage=_launch_usage) 5535 _launch_parser.add_option("-f", "--force", default=False, action='store_true', 5536 help="Use the card present in the directory for the launch, without editing them") 5537 _launch_parser.add_option("-c", "--cluster", default=False, action='store_true', 5538 help="Submit the jobs on the cluster") 5539 _launch_parser.add_option("-m", "--multicore", default=False, action='store_true', 5540 help="Submit the jobs on multicore mode") 5541 _launch_parser.add_option("-x", "--nocompile", default=False, action='store_true', 5542 help="Skip compilation. Ignored if no executable is found") 5543 _launch_parser.add_option("-r", "--reweightonly", default=False, action='store_true', 5544 help="Skip integration and event generation, just run reweight on the" + \ 5545 " latest generated event files (see list in SubProcesses/nevents_unweighted)") 5546 _launch_parser.add_option("-p", "--parton", default=False, action='store_true', 5547 help="Stop the run after the parton level file generation (you need " + \ 5548 "to shower the file in order to get physical results)") 5549 _launch_parser.add_option("-o", "--only_generation", default=False, action='store_true', 5550 help="Skip grid set up, just generate events starting from " + \ 5551 "the last available results") 5552 _launch_parser.add_option("-n", "--name", default=False, dest='run_name', 5553 help="Provide a name to the run") 5554 _launch_parser.add_option("-a", "--appl_start_grid", default=False, dest='appl_start_grid', 5555 help="For use with APPLgrid only: start from existing grids") 5556 _launch_parser.add_option("-R", "--reweight", default=False, dest='do_reweight', action='store_true', 5557 help="Run the reweight module (reweighting by different model parameters)") 5558 _launch_parser.add_option("-M", "--madspin", default=False, dest='do_madspin', action='store_true', 5559 help="Run the madspin package") 5560 5561 5562 5563 _generate_events_usage = "generate_events [MODE] [options]\n" + \ 5564 "-- execute aMC@NLO \n" + \ 5565 " MODE can be either LO, NLO, aMC@NLO or aMC@LO (if omitted, it is asked in a separate question)\n" + \ 5566 " If mode is set to LO/NLO, no event generation will be performed, but only the \n" + \ 5567 " computation of the total cross section and the filling of parton-level histograms.\n" + \ 5568 " If mode is set to aMC@LO/aMC@NLO, after the cross-section computation, a .lhe \n" + \ 5569 " event file is generated which will be showered with the MonteCarlo specified \n" + \ 5570 " in the run_card.dat\n" 5571 5572 _generate_events_parser = misc.OptionParser(usage=_generate_events_usage) 5573 _generate_events_parser.add_option("-f", "--force", default=False, action='store_true', 5574 help="Use the card present in the directory for the generate_events, without editing them") 5575 _generate_events_parser.add_option("-c", "--cluster", default=False, action='store_true', 5576 help="Submit the jobs on the cluster") 5577 _generate_events_parser.add_option("-m", "--multicore", default=False, action='store_true', 5578 help="Submit the jobs on multicore mode") 5579 _generate_events_parser.add_option("-x", "--nocompile", default=False, action='store_true', 5580 help="Skip compilation. Ignored if no executable is found") 5581 _generate_events_parser.add_option("-r", "--reweightonly", default=False, action='store_true', 5582 help="Skip integration and event generation, just run reweight on the" + \ 5583 " latest generated event files (see list in SubProcesses/nevents_unweighted)") 5584 _generate_events_parser.add_option("-p", "--parton", default=False, action='store_true', 5585 help="Stop the run after the parton level file generation (you need " + \ 5586 "to shower the file in order to get physical results)") 5587 _generate_events_parser.add_option("-o", "--only_generation", default=False, action='store_true', 5588 help="Skip grid set up, just generate events starting from " + \ 5589 "the last available results") 5590 _generate_events_parser.add_option("-n", "--name", default=False, dest='run_name', 5591 help="Provide a name to the run") 5592 5593 5594 5595 _calculate_xsect_usage = "calculate_xsect [ORDER] [options]\n" + \ 5596 "-- calculate cross section up to ORDER.\n" + \ 5597 " ORDER can be either LO or NLO (if omitted, it is set to NLO). \n" 5598 5599 _calculate_xsect_parser = misc.OptionParser(usage=_calculate_xsect_usage) 5600 _calculate_xsect_parser.add_option("-f", "--force", default=False, action='store_true', 5601 help="Use the card present in the directory for the launch, without editing them") 5602 _calculate_xsect_parser.add_option("-c", "--cluster", default=False, action='store_true', 5603 help="Submit the jobs on the cluster") 5604 _calculate_xsect_parser.add_option("-m", "--multicore", default=False, action='store_true', 5605 help="Submit the jobs on multicore mode") 5606 _calculate_xsect_parser.add_option("-x", "--nocompile", default=False, action='store_true', 5607 help="Skip compilation. Ignored if no executable is found") 5608 _calculate_xsect_parser.add_option("-n", "--name", default=False, dest='run_name', 5609 help="Provide a name to the run") 5610 _calculate_xsect_parser.add_option("-a", "--appl_start_grid", default=False, dest='appl_start_grid', 5611 help="For use with APPLgrid only: start from existing grids") 5612 _calculate_xsect_parser.add_option("-o", "--only_generation", default=False, action='store_true', 5613 help="Skip grid set up, just generate events starting from " + \ 5614 "the last available results") 5615 5616 _shower_usage = 'shower run_name [options]\n' + \ 5617 '-- do shower/hadronization on parton-level file generated for run run_name\n' + \ 5618 ' all the information (e.g. number of events, MonteCarlo, ...\n' + \ 5619 ' are directly read from the header of the event file\n' 5620 _shower_parser = misc.OptionParser(usage=_shower_usage) 5621 _shower_parser.add_option("-f", "--force", default=False, action='store_true', 5622 help="Use the shower_card present in the directory for the launch, without editing") 5623 5624 if '__main__' == __name__: 5625 # Launch the interface without any check if one code is already running. 5626 # This can ONLY run a single command !! 5627 import sys 5628 if not sys.version_info[0] == 2 or sys.version_info[1] < 6: 5629 sys.exit('MadGraph/MadEvent 5 works only with python 2.6 or later (but not python 3.X).\n'+\ 5630 'Please update your version of python.') 5631 5632 import os 5633 import optparse 5634 # Get the directory of the script real path (bin) 5635 # and add it to the current PYTHONPATH 5636 root_path = os.path.dirname(os.path.dirname(os.path.realpath( __file__ ))) 5637 sys.path.insert(0, root_path)
5638 5639 - class MyOptParser(optparse.OptionParser):
5640 - class InvalidOption(Exception): pass
5641 - def error(self, msg=''):
5642 raise MyOptParser.InvalidOption(msg)
5643 # Write out nice usage message if called with -h or --help 5644 usage = "usage: %prog [options] [FILE] " 5645 parser = MyOptParser(usage=usage) 5646 parser.add_option("-l", "--logging", default='INFO', 5647 help="logging level (DEBUG|INFO|WARNING|ERROR|CRITICAL) [%default]") 5648 parser.add_option("","--web", action="store_true", default=False, dest='web', \ 5649 help='force toce to be in secure mode') 5650 parser.add_option("","--debug", action="store_true", default=False, dest='debug', \ 5651 help='force to launch debug mode') 5652 parser_error = '' 5653 done = False 5654 5655 for i in range(len(sys.argv)-1): 5656 try: 5657 (options, args) = parser.parse_args(sys.argv[1:len(sys.argv)-i]) 5658 done = True 5659 except MyOptParser.InvalidOption, error: 5660 pass 5661 else: 5662 args += sys.argv[len(sys.argv)-i:] 5663 if not done: 5664 # raise correct error: 5665 try: 5666 (options, args) = parser.parse_args() 5667 except MyOptParser.InvalidOption, error: 5668 print error 5669 sys.exit(2) 5670 5671 if len(args) == 0: 5672 args = '' 5673 5674 import subprocess 5675 import logging 5676 import logging.config 5677 # Set logging level according to the logging level given by options 5678 #logging.basicConfig(level=vars(logging)[options.logging]) 5679 import internal.coloring_logging 5680 try: 5681 if __debug__ and options.logging == 'INFO': 5682 options.logging = 'DEBUG' 5683 if options.logging.isdigit(): 5684 level = int(options.logging) 5685 else: 5686 level = eval('logging.' + options.logging) 5687 print os.path.join(root_path, 'internal', 'me5_logging.conf') 5688 logging.config.fileConfig(os.path.join(root_path, 'internal', 'me5_logging.conf')) 5689 logging.root.setLevel(level) 5690 logging.getLogger('madgraph').setLevel(level) 5691 except: 5692 raise 5693 pass 5694 5695 # Call the cmd interface main loop 5696 try: 5697 if args: 5698 # a single command is provided 5699 if '--web' in args: 5700 i = args.index('--web') 5701 args.pop(i) 5702 cmd_line = aMCatNLOCmd(me_dir=os.path.dirname(root_path),force_run=True) 5703 else: 5704 cmd_line = aMCatNLOCmdShell(me_dir=os.path.dirname(root_path),force_run=True) 5705 5706 if not hasattr(cmd_line, 'do_%s' % args[0]): 5707 if parser_error: 5708 print parser_error 5709 print 'and %s can not be interpreted as a valid command.' % args[0] 5710 else: 5711 print 'ERROR: %s not a valid command. Please retry' % args[0] 5712 else: 5713 cmd_line.use_rawinput = False 5714 cmd_line.run_cmd(' '.join(args)) 5715 cmd_line.run_cmd('quit') 5716 5717 except KeyboardInterrupt: 5718 print 'quit on KeyboardInterrupt' 5719 pass 5720