Package madgraph :: Package iolibs :: Module export_fks
[hide private]
[frames] | no frames]

Source Code for Module madgraph.iolibs.export_fks

   1  ################################################################################ 
   2  # 
   3  # Copyright (c) 2009 The MadGraph5_aMC@NLO Development team and Contributors 
   4  # 
   5  # This file is a part of the MadGraph5_aMC@NLO project, an application which  
   6  # automatically generates Feynman diagrams and matrix elements for arbitrary 
   7  # high-energy processes in the Standard Model and beyond. 
   8  # 
   9  # It is subject to the MadGraph5_aMC@NLO license which should accompany this  
  10  # distribution. 
  11  # 
  12  # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch 
  13  # 
  14  ################################################################################ 
  15  """Methods and classes to export matrix elements to fks format.""" 
  16   
  17  from distutils import dir_util 
  18  import glob 
  19  import logging 
  20  import os 
  21  import re 
  22  import shutil 
  23  import subprocess 
  24  import string 
  25  import copy 
  26  import platform 
  27   
  28  import madgraph.core.color_algebra as color 
  29  import madgraph.core.helas_objects as helas_objects 
  30  import madgraph.core.base_objects as base_objects 
  31  import madgraph.fks.fks_helas_objects as fks_helas_objects 
  32  import madgraph.fks.fks_base as fks 
  33  import madgraph.fks.fks_common as fks_common 
  34  import madgraph.iolibs.drawing_eps as draw 
  35  import madgraph.iolibs.gen_infohtml as gen_infohtml 
  36  import madgraph.iolibs.files as files 
  37  import madgraph.various.misc as misc 
  38  import madgraph.iolibs.file_writers as writers 
  39  import madgraph.iolibs.template_files as template_files 
  40  import madgraph.iolibs.ufo_expression_parsers as parsers 
  41  import madgraph.iolibs.export_v4 as export_v4 
  42  import madgraph.loop.loop_exporters as loop_exporters 
  43  import madgraph.various.q_polynomial as q_polynomial 
  44  import madgraph.various.banner as banner_mod 
  45   
  46  import aloha.create_aloha as create_aloha 
  47   
  48  import models.write_param_card as write_param_card 
  49  import models.check_param_card as check_param_card 
  50  from madgraph import MadGraph5Error, MG5DIR, InvalidCmd 
  51  from madgraph.iolibs.files import cp, ln, mv 
  52   
  53  pjoin = os.path.join 
  54   
  55  _file_path = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0] + '/' 
  56  logger = logging.getLogger('madgraph.export_fks') 
  57   
  58   
59 -def make_jpeg_async(args):
60 Pdir = args[0] 61 old_pos = args[1] 62 dir_path = args[2] 63 64 devnull = os.open(os.devnull, os.O_RDWR) 65 66 os.chdir(Pdir) 67 subprocess.call([os.path.join(old_pos, dir_path, 'bin', 'internal', 'gen_jpeg-pl')], 68 stdout = devnull) 69 os.chdir(os.path.pardir)
70 71 72 #================================================================================= 73 # Class for used of the (non-optimized) Loop process 74 #=================================================================================
75 -class ProcessExporterFortranFKS(loop_exporters.LoopProcessExporterFortranSA):
76 """Class to take care of exporting a set of matrix elements to 77 Fortran (v4) format.""" 78 79 #=============================================================================== 80 # copy the Template in a new directory. 81 #===============================================================================
82 - def copy_fkstemplate(self):
83 """create the directory run_name as a copy of the MadEvent 84 Template, and clean the directory 85 For now it is just the same as copy_v4template, but it will be modified 86 """ 87 88 mgme_dir = self.mgme_dir 89 dir_path = self.dir_path 90 clean =self.opt['clean'] 91 92 #First copy the full template tree if dir_path doesn't exit 93 if not os.path.isdir(dir_path): 94 if not mgme_dir: 95 raise MadGraph5Error, \ 96 "No valid MG_ME path given for MG4 run directory creation." 97 logger.info('initialize a new directory: %s' % \ 98 os.path.basename(dir_path)) 99 shutil.copytree(os.path.join(mgme_dir, 'Template', 'NLO'), dir_path, True) 100 # distutils.dir_util.copy_tree since dir_path already exists 101 dir_util.copy_tree(pjoin(self.mgme_dir, 'Template', 'Common'),dir_path) 102 # Copy plot_card 103 for card in ['plot_card']: 104 if os.path.isfile(pjoin(self.dir_path, 'Cards',card + '.dat')): 105 try: 106 shutil.copy(pjoin(self.dir_path, 'Cards', card + '.dat'), 107 pjoin(self.dir_path, 'Cards', card + '_default.dat')) 108 except IOError: 109 logger.warning("Failed to move " + card + ".dat to default") 110 111 elif not os.path.isfile(os.path.join(dir_path, 'TemplateVersion.txt')): 112 if not mgme_dir: 113 raise MadGraph5Error, \ 114 "No valid MG_ME path given for MG4 run directory creation." 115 try: 116 shutil.copy(os.path.join(mgme_dir, 'MGMEVersion.txt'), dir_path) 117 except IOError: 118 MG5_version = misc.get_pkg_info() 119 open(os.path.join(dir_path, 'MGMEVersion.txt'), 'w').write( \ 120 "5." + MG5_version['version']) 121 122 #Ensure that the Template is clean 123 if clean: 124 logger.info('remove old information in %s' % os.path.basename(dir_path)) 125 if os.environ.has_key('MADGRAPH_BASE'): 126 subprocess.call([os.path.join('bin', 'internal', 'clean_template'), 127 '--web'],cwd=dir_path) 128 else: 129 try: 130 subprocess.call([os.path.join('bin', 'internal', 'clean_template')], \ 131 cwd=dir_path) 132 except Exception, why: 133 raise MadGraph5Error('Failed to clean correctly %s: \n %s' \ 134 % (os.path.basename(dir_path),why)) 135 #Write version info 136 MG_version = misc.get_pkg_info() 137 open(os.path.join(dir_path, 'SubProcesses', 'MGVersion.txt'), 'w').write( 138 MG_version['version']) 139 140 # We must link the CutTools to the Library folder of the active Template 141 self.link_CutTools(dir_path) 142 143 link_tir_libs=[] 144 tir_libs=[] 145 os.remove(os.path.join(self.dir_path,'SubProcesses','makefile_loop.inc')) 146 dirpath = os.path.join(self.dir_path, 'SubProcesses') 147 filename = pjoin(self.dir_path, 'SubProcesses','makefile_loop') 148 calls = self.write_makefile_TIR(writers.MakefileWriter(filename), 149 link_tir_libs,tir_libs) 150 os.remove(os.path.join(self.dir_path,'Source','make_opts.inc')) 151 filename = pjoin(self.dir_path, 'Source','make_opts') 152 calls = self.write_make_opts(writers.MakefileWriter(filename), 153 link_tir_libs,tir_libs) 154 155 # Duplicate run_card and FO_analyse_card 156 for card in ['FO_analyse_card', 'shower_card']: 157 try: 158 shutil.copy(pjoin(self.dir_path, 'Cards', 159 card + '.dat'), 160 pjoin(self.dir_path, 'Cards', 161 card + '_default.dat')) 162 except IOError: 163 logger.warning("Failed to copy " + card + ".dat to default") 164 165 cwd = os.getcwd() 166 dirpath = os.path.join(self.dir_path, 'SubProcesses') 167 try: 168 os.chdir(dirpath) 169 except os.error: 170 logger.error('Could not cd to directory %s' % dirpath) 171 return 0 172 173 # We add here the user-friendly MadLoop option setter. 174 cpfiles= ["SubProcesses/MadLoopParamReader.f", 175 "Cards/MadLoopParams.dat", 176 "SubProcesses/MadLoopParams.inc"] 177 178 for file in cpfiles: 179 shutil.copy(os.path.join(self.loop_dir,'StandAlone/', file), 180 os.path.join(self.dir_path, file)) 181 182 shutil.copy(pjoin(self.dir_path, 'Cards','MadLoopParams.dat'), 183 pjoin(self.dir_path, 'Cards','MadLoopParams_default.dat')) 184 185 if os.path.exists(pjoin(self.dir_path, 'Cards', 'MadLoopParams.dat')): 186 self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.dir_path, 187 'Cards', 'MadLoopParams.dat')) 188 # write the output file 189 self.MadLoopparam.write(pjoin(self.dir_path,"SubProcesses", 190 "MadLoopParams.dat")) 191 192 # We need minimal editing of MadLoopCommons.f 193 MadLoopCommon = open(os.path.join(self.loop_dir,'StandAlone', 194 "SubProcesses","MadLoopCommons.inc")).read() 195 writer = writers.FortranWriter(os.path.join(self.dir_path, 196 "SubProcesses","MadLoopCommons.f")) 197 writer.writelines(MadLoopCommon%{ 198 'print_banner_commands':self.MadLoop_banner}, 199 context={'collier_available':False}) 200 writer.close() 201 202 # Write the cts_mpc.h and cts_mprec.h files imported from CutTools 203 self.write_mp_files(writers.FortranWriter('cts_mprec.h'),\ 204 writers.FortranWriter('cts_mpc.h')) 205 206 207 # Finally make sure to turn off MC over Hel for the default mode. 208 FKS_card_path = pjoin(self.dir_path,'Cards','FKS_params.dat') 209 FKS_card_file = open(FKS_card_path,'r') 210 FKS_card = FKS_card_file.read() 211 FKS_card_file.close() 212 FKS_card = re.sub(r"#NHelForMCoverHels\n-?\d+", 213 "#NHelForMCoverHels\n-1", FKS_card) 214 FKS_card_file = open(FKS_card_path,'w') 215 FKS_card_file.write(FKS_card) 216 FKS_card_file.close() 217 218 # Return to original PWD 219 os.chdir(cwd) 220 # Copy the different python files in the Template 221 self.copy_python_files() 222 223 # We need to create the correct open_data for the pdf 224 self.write_pdf_opendata()
225 226 # I put it here not in optimized one, because I want to use the same makefile_loop.inc 227 # Also, we overload this function (i.e. it is already defined in 228 # LoopProcessExporterFortranSA) because the path of the template makefile 229 # is different.
230 - def write_makefile_TIR(self, writer, link_tir_libs,tir_libs,tir_include=[]):
231 """ Create the file makefile_loop which links to the TIR libraries.""" 232 233 file = open(os.path.join(self.mgme_dir,'Template','NLO', 234 'SubProcesses','makefile_loop.inc')).read() 235 replace_dict={} 236 replace_dict['link_tir_libs']=' '.join(link_tir_libs) 237 replace_dict['tir_libs']=' '.join(tir_libs) 238 replace_dict['dotf']='%.f' 239 replace_dict['doto']='%.o' 240 replace_dict['tir_include']=' '.join(tir_include) 241 file=file%replace_dict 242 if writer: 243 writer.writelines(file) 244 else: 245 return file
246 247 # I put it here not in optimized one, because I want to use the same make_opts.inc
248 - def write_make_opts(self, writer, link_tir_libs,tir_libs):
249 """ Create the file make_opts which links to the TIR libraries.""" 250 file = open(os.path.join(self.mgme_dir,'Template','NLO', 251 'Source','make_opts.inc')).read() 252 replace_dict={} 253 replace_dict['link_tir_libs']=' '.join(link_tir_libs) 254 replace_dict['tir_libs']=' '.join(tir_libs) 255 replace_dict['dotf']='%.f' 256 replace_dict['doto']='%.o' 257 file=file%replace_dict 258 if writer: 259 writer.writelines(file) 260 else: 261 return file
262 263 #=========================================================================== 264 # copy_python_files 265 #===========================================================================
266 - def copy_python_files(self):
267 """copy python files required for the Template""" 268 269 files_to_copy = [ \ 270 pjoin('interface','amcatnlo_run_interface.py'), 271 pjoin('interface','extended_cmd.py'), 272 pjoin('interface','common_run_interface.py'), 273 pjoin('interface','coloring_logging.py'), 274 pjoin('various','misc.py'), 275 pjoin('various','shower_card.py'), 276 pjoin('various','FO_analyse_card.py'), 277 pjoin('various','histograms.py'), 278 pjoin('various','banner.py'), 279 pjoin('various','cluster.py'), 280 pjoin('various','systematics.py'), 281 pjoin('various','lhe_parser.py'), 282 pjoin('madevent','sum_html.py'), 283 pjoin('madevent','gen_crossxhtml.py'), 284 pjoin('iolibs','files.py'), 285 pjoin('iolibs','save_load_object.py'), 286 pjoin('iolibs','file_writers.py'), 287 pjoin('..','models','check_param_card.py'), 288 pjoin('__init__.py') 289 ] 290 cp(_file_path+'/interface/.mg5_logging.conf', 291 self.dir_path+'/bin/internal/me5_logging.conf') 292 293 for cp_file in files_to_copy: 294 cp(pjoin(_file_path,cp_file), 295 pjoin(self.dir_path,'bin','internal',os.path.basename(cp_file)))
296
297 - def convert_model(self, model, wanted_lorentz = [], 298 wanted_couplings = []):
299 300 super(ProcessExporterFortranFKS,self).convert_model(model, 301 wanted_lorentz, wanted_couplings) 302 303 IGNORE_PATTERNS = ('*.pyc','*.dat','*.py~') 304 try: 305 shutil.rmtree(pjoin(self.dir_path,'bin','internal','ufomodel')) 306 except OSError as error: 307 pass 308 model_path = model.get('modelpath') 309 shutil.copytree(model_path, 310 pjoin(self.dir_path,'bin','internal','ufomodel'), 311 ignore=shutil.ignore_patterns(*IGNORE_PATTERNS)) 312 if hasattr(model, 'restrict_card'): 313 out_path = pjoin(self.dir_path, 'bin', 'internal','ufomodel', 314 'restrict_default.dat') 315 if isinstance(model.restrict_card, check_param_card.ParamCard): 316 model.restrict_card.write(out_path) 317 else: 318 files.cp(model.restrict_card, out_path)
319 320 321 322 #=========================================================================== 323 # write_maxparticles_file 324 #===========================================================================
325 - def write_maxparticles_file(self, writer, maxparticles):
326 """Write the maxparticles.inc file for MadEvent""" 327 328 lines = "integer max_particles, max_branch\n" 329 lines += "parameter (max_particles=%d) \n" % maxparticles 330 lines += "parameter (max_branch=max_particles-1)" 331 332 # Write the file 333 writer.writelines(lines) 334 335 return True
336 337 338 #=========================================================================== 339 # write_maxconfigs_file 340 #===========================================================================
341 - def write_maxconfigs_file(self, writer, maxconfigs):
342 """Write the maxconfigs.inc file for MadEvent""" 343 344 lines = "integer lmaxconfigs\n" 345 lines += "parameter (lmaxconfigs=%d)" % maxconfigs 346 347 # Write the file 348 writer.writelines(lines) 349 350 return True
351 352 353 #=============================================================================== 354 # write a procdef_mg5 (an equivalent of the MG4 proc_card.dat) 355 #===============================================================================
356 - def write_procdef_mg5(self, file_pos, modelname, process_str):
357 """ write an equivalent of the MG4 proc_card in order that all the Madevent 358 Perl script of MadEvent4 are still working properly for pure MG5 run.""" 359 360 proc_card_template = template_files.mg4_proc_card.mg4_template 361 process_template = template_files.mg4_proc_card.process_template 362 process_text = '' 363 coupling = '' 364 new_process_content = [] 365 366 # First find the coupling and suppress the coupling from process_str 367 #But first ensure that coupling are define whithout spaces: 368 process_str = process_str.replace(' =', '=') 369 process_str = process_str.replace('= ', '=') 370 process_str = process_str.replace(',',' , ') 371 #now loop on the element and treat all the coupling 372 for info in process_str.split(): 373 if '=' in info: 374 coupling += info + '\n' 375 else: 376 new_process_content.append(info) 377 # Recombine the process_str (which is the input process_str without coupling 378 #info) 379 process_str = ' '.join(new_process_content) 380 381 #format the SubProcess 382 process_text += process_template.substitute({'process': process_str, \ 383 'coupling': coupling}) 384 385 text = proc_card_template.substitute({'process': process_text, 386 'model': modelname, 387 'multiparticle':''}) 388 ff = open(file_pos, 'w') 389 ff.write(text) 390 ff.close()
391 392 393 #=============================================================================== 394 # write a initial states map, useful for the fast PDF NLO interface 395 #===============================================================================
396 - def write_init_map(self, file_pos, initial_states):
397 """ Write an initial state process map. Each possible PDF 398 combination gets an unique identifier.""" 399 400 text='' 401 for i,e in enumerate(initial_states): 402 text=text+str(i+1)+' '+str(len(e)) 403 for t in e: 404 if len(t) ==1: 405 t.append(0) 406 text=text+' ' 407 try: 408 for p in t: 409 if p == None : p = 0 410 text=text+' '+str(p) 411 except TypeError: 412 text=text+' '+str(t) 413 text=text+'\n' 414 415 ff = open(file_pos, 'w') 416 ff.write(text) 417 ff.close()
418
419 - def get_ME_identifier(self, matrix_element, *args, **opts):
420 """ A function returning a string uniquely identifying the matrix 421 element given in argument so that it can be used as a prefix to all 422 MadLoop5 subroutines and common blocks related to it. This allows 423 to compile several processes into one library as requested by the 424 BLHA (Binoth LesHouches Accord) guidelines. The MadFKS design 425 necessitates that there is no process prefix.""" 426 427 return ''
428 429 #=============================================================================== 430 # write_coef_specs 431 #===============================================================================
432 - def write_coef_specs_file(self, virt_me_list):
433 """writes the coef_specs.inc in the DHELAS folder. Should not be called in the 434 non-optimized mode""" 435 raise fks_common.FKSProcessError(), \ 436 "write_coef_specs should be called only in the loop-optimized mode"
437 438 439 #=============================================================================== 440 # generate_directories_fks 441 #===============================================================================
442 - def generate_directories_fks(self, matrix_element, fortran_model, me_number, 443 me_ntot, path=os.getcwd(),OLP='MadLoop'):
444 """Generate the Pxxxxx_i directories for a subprocess in MadFKS, 445 including the necessary matrix.f and various helper files""" 446 proc = matrix_element.born_matrix_element['processes'][0] 447 448 if not self.model: 449 self.model = matrix_element.get('processes')[0].get('model') 450 451 cwd = os.getcwd() 452 try: 453 os.chdir(path) 454 except OSError, error: 455 error_msg = "The directory %s should exist in order to be able " % path + \ 456 "to \"export\" in it. If you see this error message by " + \ 457 "typing the command \"export\" please consider to use " + \ 458 "instead the command \"output\". " 459 raise MadGraph5Error, error_msg 460 461 calls = 0 462 463 self.fksdirs = [] 464 #first make and cd the direcrory corresponding to the born process: 465 borndir = "P%s" % \ 466 (matrix_element.get('processes')[0].shell_string()) 467 os.mkdir(borndir) 468 os.chdir(borndir) 469 logger.info('Writing files in %s (%d / %d)' % (borndir, me_number + 1, me_ntot)) 470 471 ## write the files corresponding to the born process in the P* directory 472 self.generate_born_fks_files(matrix_element, 473 fortran_model, me_number, path) 474 475 # With NJET you want to generate the order file per subprocess and most 476 # likely also generate it for each subproc. 477 if OLP=='NJET': 478 filename = 'OLE_order.lh' 479 self.write_lh_order(filename, [matrix_element.born_matrix_element.get('processes')[0]], OLP) 480 481 if matrix_element.virt_matrix_element: 482 calls += self.generate_virt_directory( \ 483 matrix_element.virt_matrix_element, \ 484 fortran_model, \ 485 os.path.join(path, borndir)) 486 487 #write the infortions for the different real emission processes 488 489 self.write_real_matrix_elements(matrix_element, fortran_model) 490 491 self.write_pdf_calls(matrix_element, fortran_model) 492 493 filename = 'nFKSconfigs.inc' 494 self.write_nfksconfigs_file(writers.FortranWriter(filename), 495 matrix_element, 496 fortran_model) 497 498 filename = 'iproc.dat' 499 self.write_iproc_file(writers.FortranWriter(filename), 500 me_number) 501 502 filename = 'fks_info.inc' 503 self.write_fks_info_file(writers.FortranWriter(filename), 504 matrix_element, 505 fortran_model) 506 507 filename = 'leshouche_info.dat' 508 nfksconfs,maxproc,maxflow,nexternal=\ 509 self.write_leshouche_info_file(filename,matrix_element) 510 511 # if no corrections are generated ([LOonly] mode), get 512 # these variables from the born 513 if nfksconfs == maxproc == maxflow == 0: 514 nfksconfs = 1 515 (dummylines, maxproc, maxflow) = self.get_leshouche_lines( 516 matrix_element.born_matrix_element, 1) 517 518 filename = 'leshouche_decl.inc' 519 self.write_leshouche_info_declarations( 520 writers.FortranWriter(filename), 521 nfksconfs,maxproc,maxflow,nexternal, 522 fortran_model) 523 filename = 'genps.inc' 524 ngraphs = matrix_element.born_matrix_element.get_number_of_amplitudes() 525 ncolor = max(1,len(matrix_element.born_matrix_element.get('color_basis'))) 526 self.write_genps(writers.FortranWriter(filename),maxproc,ngraphs,\ 527 ncolor,maxflow,fortran_model) 528 529 filename = 'configs_and_props_info.dat' 530 nconfigs,max_leg_number=self.write_configs_and_props_info_file( 531 filename, 532 matrix_element) 533 534 filename = 'configs_and_props_decl.inc' 535 self.write_configs_and_props_info_declarations( 536 writers.FortranWriter(filename), 537 nconfigs,max_leg_number,nfksconfs, 538 fortran_model) 539 540 filename = 'real_from_born_configs.inc' 541 self.write_real_from_born_configs( 542 writers.FortranWriter(filename), 543 matrix_element, 544 fortran_model) 545 546 filename = 'ngraphs.inc' 547 self.write_ngraphs_file(writers.FortranWriter(filename), 548 nconfigs) 549 550 #write the wrappers 551 filename = 'real_me_chooser.f' 552 self.write_real_me_wrapper(writers.FortranWriter(filename), 553 matrix_element, 554 fortran_model) 555 556 filename = 'parton_lum_chooser.f' 557 self.write_pdf_wrapper(writers.FortranWriter(filename), 558 matrix_element, 559 fortran_model) 560 561 filename = 'get_color.f' 562 self.write_colors_file(writers.FortranWriter(filename), 563 matrix_element) 564 565 filename = 'nexternal.inc' 566 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 567 self.write_nexternal_file(writers.FortranWriter(filename), 568 nexternal, ninitial) 569 self.proc_characteristic['ninitial'] = ninitial 570 self.proc_characteristic['nexternal'] = max(self.proc_characteristic['nexternal'], nexternal) 571 572 filename = 'pmass.inc' 573 try: 574 self.write_pmass_file(writers.FortranWriter(filename), 575 matrix_element.real_processes[0].matrix_element) 576 except IndexError: 577 self.write_pmass_file(writers.FortranWriter(filename), 578 matrix_element.born_matrix_element) 579 580 #draw the diagrams 581 self.draw_feynman_diagrams(matrix_element) 582 583 linkfiles = ['BinothLHADummy.f', 584 'check_poles.f', 585 'MCmasses_HERWIG6.inc', 586 'MCmasses_HERWIGPP.inc', 587 'MCmasses_PYTHIA6Q.inc', 588 'MCmasses_PYTHIA6PT.inc', 589 'MCmasses_PYTHIA8.inc', 590 'add_write_info.f', 591 'coupl.inc', 592 'cuts.f', 593 'FKS_params.dat', 594 'initial_states_map.dat', 595 'OLE_order.olc', 596 'FKSParams.inc', 597 'FKSParamReader.f', 598 'cuts.inc', 599 'unlops.inc', 600 'pythia_unlops.f', 601 'driver_mintMC.f', 602 'driver_mintFO.f', 603 'appl_interface.cc', 604 'appl_interface_dummy.f', 605 'appl_common.inc', 606 'reweight_appl.inc', 607 'fastjetfortran_madfks_core.cc', 608 'fastjetfortran_madfks_full.cc', 609 'fjcore.cc', 610 'fastjet_wrapper.f', 611 'fjcore.hh', 612 'fks_Sij.f', 613 'fks_powers.inc', 614 'fks_singular.f', 615 'veto_xsec.f', 616 'veto_xsec.inc', 617 'weight_lines.f', 618 'fks_inc_chooser.f', 619 'leshouche_inc_chooser.f', 620 'configs_and_props_inc_chooser.f', 621 'genps_fks.f', 622 'boostwdir2.f', 623 'madfks_mcatnlo.inc', 624 'open_output_files.f', 625 'open_output_files_dummy.f', 626 'HwU_dummy.f', 627 'madfks_plot.f', 628 'analysis_dummy.f', 629 'analysis_lhe.f', 630 'mint-integrator2.f', 631 'MC_integer.f', 632 'mint.inc', 633 'montecarlocounter.f', 634 'q_es.inc', 635 'recluster.cc', 636 'Boosts.h', 637 'reweight_xsec.f', 638 'reweight_xsec_events.f', 639 'reweight_xsec_events_pdf_dummy.f', 640 'iproc_map.f', 641 'run.inc', 642 'run_card.inc', 643 'setcuts.f', 644 'setscales.f', 645 'test_soft_col_limits.f', 646 'symmetry_fks_v3.f', 647 'vegas2.for', 648 'write_ajob.f', 649 'handling_lhe_events.f', 650 'write_event.f', 651 'fill_MC_mshell.f', 652 'maxparticles.inc', 653 'message.inc', 654 'initcluster.f', 655 'cluster.inc', 656 'cluster.f', 657 'reweight.f', 658 'randinit', 659 'sudakov.inc', 660 'maxconfigs.inc', 661 'timing_variables.inc'] 662 663 for file in linkfiles: 664 ln('../' + file , '.') 665 os.system("ln -s ../../Cards/param_card.dat .") 666 667 #copy the makefile 668 os.system("ln -s ../makefile_fks_dir ./makefile") 669 if matrix_element.virt_matrix_element: 670 os.system("ln -s ../BinothLHA.f ./BinothLHA.f") 671 elif OLP!='MadLoop': 672 os.system("ln -s ../BinothLHA_OLP.f ./BinothLHA.f") 673 else: 674 os.system("ln -s ../BinothLHA_user.f ./BinothLHA.f") 675 676 # Return to SubProcesses dir 677 os.chdir(os.path.pardir) 678 # Add subprocess to subproc.mg 679 filename = 'subproc.mg' 680 files.append_to_file(filename, 681 self.write_subproc, 682 borndir) 683 684 os.chdir(cwd) 685 # Generate info page 686 gen_infohtml.make_info_html_nlo(self.dir_path) 687 688 689 return calls
690 691 #=========================================================================== 692 # create the run_card 693 #===========================================================================
694 - def create_run_card(self, processes, history):
695 """ """ 696 697 run_card = banner_mod.RunCardNLO() 698 699 run_card.create_default_for_process(self.proc_characteristic, 700 history, 701 processes) 702 703 run_card.write(pjoin(self.dir_path, 'Cards', 'run_card_default.dat')) 704 run_card.write(pjoin(self.dir_path, 'Cards', 'run_card.dat'))
705 706
707 - def pass_information_from_cmd(self, cmd):
708 """pass information from the command interface to the exporter. 709 Please do not modify any object of the interface from the exporter. 710 """ 711 self.proc_defs = cmd._curr_proc_defs 712 if hasattr(cmd,'born_processes'): 713 self.born_processes = cmd.born_processes 714 else: 715 self.born_processes = [] 716 return
717
718 - def finalize(self, matrix_elements, history, mg5options, flaglist):
719 """Finalize FKS directory by creating jpeg diagrams, html 720 pages,proc_card_mg5.dat and madevent.tar.gz and create the MA5 card if 721 necessary.""" 722 723 devnull = os.open(os.devnull, os.O_RDWR) 724 try: 725 res = misc.call([mg5options['lhapdf'], '--version'], \ 726 stdout=subprocess.PIPE, stderr=subprocess.PIPE) 727 except Exception: 728 res = 1 729 if res != 0: 730 logger.info('The value for lhapdf in the current configuration does not ' + \ 731 'correspond to a valid executable.\nPlease set it correctly either in ' + \ 732 'input/mg5_configuration or with "set lhapdf /path/to/lhapdf-config" ' + \ 733 'and regenrate the process. \nTo avoid regeneration, edit the ' + \ 734 ('%s/Cards/amcatnlo_configuration.txt file.\n' % self.dir_path ) + \ 735 'Note that you can still compile and run aMC@NLO with the built-in PDFs\n') 736 737 compiler_dict = {'fortran': mg5options['fortran_compiler'], 738 'cpp': mg5options['cpp_compiler'], 739 'f2py': mg5options['f2py_compiler']} 740 741 if 'nojpeg' in flaglist: 742 makejpg = False 743 else: 744 makejpg = True 745 output_dependencies = mg5options['output_dependencies'] 746 747 748 self.proc_characteristic['grouped_matrix'] = False 749 self.proc_characteristic['complex_mass_scheme'] = mg5options['complex_mass_scheme'] 750 751 self.create_proc_charac() 752 753 self.create_run_card(matrix_elements.get_processes(), history) 754 # modelname = self.model.get('name') 755 # if modelname == 'mssm' or modelname.startswith('mssm-'): 756 # param_card = os.path.join(self.dir_path, 'Cards','param_card.dat') 757 # mg5_param = os.path.join(self.dir_path, 'Source', 'MODEL', 'MG5_param.dat') 758 # check_param_card.convert_to_mg5card(param_card, mg5_param) 759 # check_param_card.check_valid_param_card(mg5_param) 760 761 # # write the model functions get_mass/width_from_id 762 filename = os.path.join(self.dir_path,'Source','MODEL','get_mass_width_fcts.f') 763 makeinc = os.path.join(self.dir_path,'Source','MODEL','makeinc.inc') 764 self.write_get_mass_width_file(writers.FortranWriter(filename), makeinc, self.model) 765 766 # # Write maxconfigs.inc based on max of ME's/subprocess groups 767 768 filename = os.path.join(self.dir_path,'Source','maxconfigs.inc') 769 self.write_maxconfigs_file(writers.FortranWriter(filename), 770 matrix_elements.get_max_configs()) 771 772 # # Write maxparticles.inc based on max of ME's/subprocess groups 773 filename = os.path.join(self.dir_path,'Source','maxparticles.inc') 774 self.write_maxparticles_file(writers.FortranWriter(filename), 775 matrix_elements.get_max_particles()) 776 777 # Touch "done" file 778 os.system('touch %s/done' % os.path.join(self.dir_path,'SubProcesses')) 779 780 # Check for compiler 781 fcompiler_chosen = self.set_fortran_compiler(compiler_dict) 782 ccompiler_chosen = self.set_cpp_compiler(compiler_dict['cpp']) 783 784 old_pos = os.getcwd() 785 os.chdir(os.path.join(self.dir_path, 'SubProcesses')) 786 P_dir_list = [proc for proc in os.listdir('.') if os.path.isdir(proc) and \ 787 proc[0] == 'P'] 788 789 devnull = os.open(os.devnull, os.O_RDWR) 790 # Convert the poscript in jpg files (if authorize) 791 if makejpg: 792 logger.info("Generate jpeg diagrams") 793 for Pdir in P_dir_list: 794 os.chdir(Pdir) 795 subprocess.call([os.path.join(old_pos, self.dir_path, 'bin', 'internal', 'gen_jpeg-pl')], 796 stdout = devnull) 797 os.chdir(os.path.pardir) 798 # 799 logger.info("Generate web pages") 800 # Create the WebPage using perl script 801 802 subprocess.call([os.path.join(old_pos, self.dir_path, 'bin', 'internal', 'gen_cardhtml-pl')], \ 803 stdout = devnull) 804 805 os.chdir(os.path.pardir) 806 # 807 # obj = gen_infohtml.make_info_html(self.dir_path) 808 # [mv(name, './HTML/') for name in os.listdir('.') if \ 809 # (name.endswith('.html') or name.endswith('.jpg')) and \ 810 # name != 'index.html'] 811 # if online: 812 # nb_channel = obj.rep_rule['nb_gen_diag'] 813 # open(os.path.join('./Online'),'w').write(str(nb_channel)) 814 815 # Write command history as proc_card_mg5 816 if os.path.isdir('Cards'): 817 output_file = os.path.join('Cards', 'proc_card_mg5.dat') 818 history.write(output_file) 819 820 # Duplicate run_card and FO_analyse_card 821 for card in ['run_card', 'FO_analyse_card', 'shower_card']: 822 try: 823 shutil.copy(pjoin(self.dir_path, 'Cards', 824 card + '.dat'), 825 pjoin(self.dir_path, 'Cards', 826 card + '_default.dat')) 827 except IOError: 828 logger.warning("Failed to copy " + card + ".dat to default") 829 830 831 subprocess.call([os.path.join(old_pos, self.dir_path, 'bin', 'internal', 'gen_cardhtml-pl')], 832 stdout = devnull) 833 834 # Run "make" to generate madevent.tar.gz file 835 if os.path.exists(pjoin('SubProcesses', 'subproc.mg')): 836 if os.path.exists('amcatnlo.tar.gz'): 837 os.remove('amcatnlo.tar.gz') 838 subprocess.call([os.path.join(old_pos, self.dir_path, 'bin', 'internal', 'make_amcatnlo_tar')], 839 stdout = devnull) 840 # 841 subprocess.call([os.path.join(old_pos, self.dir_path, 'bin', 'internal', 'gen_cardhtml-pl')], 842 stdout = devnull) 843 844 #return to the initial dir 845 os.chdir(old_pos) 846 847 # Setup stdHep 848 # Find the correct fortran compiler 849 base_compiler= ['FC=g77','FC=gfortran'] 850 851 StdHep_path = pjoin(MG5DIR, 'vendor', 'StdHEP') 852 853 if output_dependencies == 'external': 854 # check if stdhep has to be compiled (only the first time) 855 if not os.path.exists(pjoin(MG5DIR, 'vendor', 'StdHEP', 'lib', 'libstdhep.a')) or \ 856 not os.path.exists(pjoin(MG5DIR, 'vendor', 'StdHEP', 'lib', 'libFmcfio.a')): 857 if 'FC' not in os.environ or not os.environ['FC']: 858 path = os.path.join(StdHep_path, 'src', 'make_opts') 859 text = open(path).read() 860 for base in base_compiler: 861 text = text.replace(base,'FC=%s' % fcompiler_chosen) 862 open(path, 'w').writelines(text) 863 864 logger.info('Compiling StdHEP. This has to be done only once.') 865 misc.compile(cwd = pjoin(MG5DIR, 'vendor', 'StdHEP')) 866 logger.info('Done.') 867 #then link the libraries in the exported dir 868 files.ln(pjoin(StdHep_path, 'lib', 'libstdhep.a'), \ 869 pjoin(self.dir_path, 'MCatNLO', 'lib')) 870 files.ln(pjoin(StdHep_path, 'lib', 'libFmcfio.a'), \ 871 pjoin(self.dir_path, 'MCatNLO', 'lib')) 872 873 elif output_dependencies == 'internal': 874 StdHEP_internal_path = pjoin(self.dir_path,'Source','StdHEP') 875 shutil.copytree(StdHep_path,StdHEP_internal_path, symlinks=True) 876 # Create the links to the lib folder 877 linkfiles = ['libstdhep.a', 'libFmcfio.a'] 878 for file in linkfiles: 879 ln(pjoin(os.path.pardir,os.path.pardir,'Source','StdHEP','lib',file), 880 os.path.join(self.dir_path, 'MCatNLO', 'lib')) 881 if 'FC' not in os.environ or not os.environ['FC']: 882 path = pjoin(StdHEP_internal_path, 'src', 'make_opts') 883 text = open(path).read() 884 for base in base_compiler: 885 text = text.replace(base,'FC=%s' % fcompiler_chosen) 886 open(path, 'w').writelines(text) 887 # To avoid compiler version conflicts, we force a clean here 888 misc.compile(['clean'],cwd = StdHEP_internal_path) 889 890 elif output_dependencies == 'environment_paths': 891 # Here the user chose to define the dependencies path in one of 892 # his environmental paths 893 libStdHep = misc.which_lib('libstdhep.a') 894 libFmcfio = misc.which_lib('libFmcfio.a') 895 if not libStdHep is None and not libFmcfio is None: 896 logger.info('MG5_aMC is using StdHep installation found at %s.'%\ 897 os.path.dirname(libStdHep)) 898 ln(pjoin(libStdHep),pjoin(self.dir_path, 'MCatNLO', 'lib'),abspath=True) 899 ln(pjoin(libFmcfio),pjoin(self.dir_path, 'MCatNLO', 'lib'),abspath=True) 900 else: 901 raise InvalidCmd("Could not find the location of the files"+\ 902 " libstdhep.a and libFmcfio.a in you environment paths.") 903 904 else: 905 raise MadGraph5Error, 'output_dependencies option %s not recognized'\ 906 %output_dependencies 907 908 # Create the default MadAnalysis5 cards 909 if 'madanalysis5_path' in self.opt and not \ 910 self.opt['madanalysis5_path'] is None and not self.proc_defs is None: 911 # When using 912 processes = sum([me.get('processes') if not isinstance(me, str) else [] \ 913 for me in matrix_elements.get('matrix_elements')],[]) 914 915 # Try getting the processes from the generation info directly if no ME are 916 # available (as it is the case for parallel generation 917 if len(processes)==0: 918 processes = self.born_processes 919 if len(processes)==0: 920 logger.warning( 921 """MG5aMC could not provide to Madanalysis5 the list of processes generated. 922 As a result, the default card will not be tailored to the process generated. 923 This typically happens when using the 'low_mem_multicore_nlo_generation' NLO generation mode.""") 924 # For now, simply assign all processes to each proc_defs. 925 # That shouldn't really affect the default analysis card created by MA5 926 self.create_default_madanalysis5_cards( 927 history, self.proc_defs, [processes,]*len(self.proc_defs), 928 self.opt['madanalysis5_path'], pjoin(self.dir_path,'Cards'), 929 levels =['hadron'])
930
931 - def write_real_from_born_configs(self, writer, matrix_element, fortran_model):
932 """Writes the real_from_born_configs.inc file that contains 933 the mapping to go for a given born configuration (that is used 934 e.g. in the multi-channel phase-space integration to the 935 corresponding real-emission diagram, i.e. the real emission 936 diagram in which the combined ij is split in i_fks and 937 j_fks.""" 938 lines=[] 939 lines2=[] 940 max_links=0 941 born_me=matrix_element.born_matrix_element 942 for iFKS, conf in enumerate(matrix_element.get_fks_info_list()): 943 iFKS=iFKS+1 944 links=conf['fks_info']['rb_links'] 945 max_links=max(max_links,len(links)) 946 for i,diags in enumerate(links): 947 if not i == diags['born_conf']: 948 print links 949 raise MadGraph5Error, "born_conf should be canonically ordered" 950 real_configs=', '.join(['%d' % int(diags['real_conf']+1) for diags in links]) 951 lines.append("data (real_from_born_conf(irfbc,%d),irfbc=1,%d) /%s/" \ 952 % (iFKS,len(links),real_configs)) 953 954 # this is for 'LOonly' processes; in this case, a fake configuration 955 # with all the born diagrams is written 956 if not matrix_element.get_fks_info_list(): 957 # compute (again) the number of configurations at the born 958 base_diagrams = born_me.get('base_amplitude').get('diagrams') 959 minvert = min([max([len(vert.get('legs')) for vert in \ 960 diag.get('vertices')]) for diag in base_diagrams]) 961 962 for idiag, diag in enumerate(base_diagrams): 963 if any([len(vert.get('legs')) > minvert for vert in 964 diag.get('vertices')]): 965 # Only 3-vertices allowed in configs.inc 966 continue 967 max_links = max_links + 1 968 969 real_configs=', '.join(['%d' % i for i in range(1, max_links+1)]) 970 lines.append("data (real_from_born_conf(irfbc,%d),irfbc=1,%d) /%s/" \ 971 % (1,max_links,real_configs)) 972 973 lines2.append("integer irfbc") 974 lines2.append("integer real_from_born_conf(%d,%d)" \ 975 % (max_links, max(len(matrix_element.get_fks_info_list()),1))) 976 # Write the file 977 writer.writelines(lines2+lines)
978 979 980 #=============================================================================== 981 # write_get_mass_width_file 982 #=============================================================================== 983 #test written
984 - def write_get_mass_width_file(self, writer, makeinc, model):
985 """Write the get_mass_width_file.f file for MG4. 986 Also update the makeinc.inc file 987 """ 988 mass_particles = [p for p in model['particles'] if p['mass'].lower() != 'zero'] 989 width_particles = [p for p in model['particles'] if p['width'].lower() != 'zero'] 990 991 iflines_mass = '' 992 iflines_width = '' 993 994 for i, part in enumerate(mass_particles): 995 if i == 0: 996 ifstring = 'if' 997 else: 998 ifstring = 'else if' 999 if part['self_antipart']: 1000 iflines_mass += '%s (id.eq.%d) then\n' % \ 1001 (ifstring, part.get_pdg_code()) 1002 else: 1003 iflines_mass += '%s (id.eq.%d.or.id.eq.%d) then\n' % \ 1004 (ifstring, part.get_pdg_code(), part.get_anti_pdg_code()) 1005 iflines_mass += 'get_mass_from_id=abs(%s)\n' % part.get('mass') 1006 1007 for i, part in enumerate(width_particles): 1008 if i == 0: 1009 ifstring = 'if' 1010 else: 1011 ifstring = 'else if' 1012 if part['self_antipart']: 1013 iflines_width += '%s (id.eq.%d) then\n' % \ 1014 (ifstring, part.get_pdg_code()) 1015 else: 1016 iflines_width += '%s (id.eq.%d.or.id.eq.%d) then\n' % \ 1017 (ifstring, part.get_pdg_code(), part.get_anti_pdg_code()) 1018 iflines_width += 'get_width_from_id=abs(%s)\n' % part.get('width') 1019 1020 # Make sure it compiles with an if-statement if the above lists are empty 1021 if len(mass_particles)==0: 1022 iflines_mass = 'if (.True.) then\n' 1023 1024 if len(width_particles)==0: 1025 iflines_width = 'if (.True.) then\n' 1026 1027 replace_dict = {'iflines_mass' : iflines_mass, 1028 'iflines_width' : iflines_width} 1029 1030 file = open(os.path.join(_file_path, \ 1031 'iolibs/template_files/get_mass_width_fcts.inc')).read() 1032 file = file % replace_dict 1033 1034 # Write the file 1035 writer.writelines(file) 1036 1037 # update the makeinc 1038 makeinc_content = open(makeinc).read() 1039 makeinc_content = makeinc_content.replace('MODEL = ', 'MODEL = get_mass_width_fcts.o ') 1040 open(makeinc, 'w').write(makeinc_content) 1041 1042 return
1043 1044
1045 - def write_configs_and_props_info_declarations(self, writer, max_iconfig, max_leg_number, nfksconfs, fortran_model):
1046 """writes the declarations for the variables relevant for configs_and_props 1047 """ 1048 lines = [] 1049 lines.append("integer ifr,lmaxconfigs_used,max_branch_used") 1050 lines.append("parameter (lmaxconfigs_used=%4d)" % max_iconfig) 1051 lines.append("parameter (max_branch_used =%4d)" % -max_leg_number) 1052 lines.append("integer mapconfig_d(%3d,0:lmaxconfigs_used)" % nfksconfs) 1053 lines.append("integer iforest_d(%3d,2,-max_branch_used:-1,lmaxconfigs_used)" % nfksconfs) 1054 lines.append("integer sprop_d(%3d,-max_branch_used:-1,lmaxconfigs_used)" % nfksconfs) 1055 lines.append("integer tprid_d(%3d,-max_branch_used:-1,lmaxconfigs_used)" % nfksconfs) 1056 lines.append("double precision pmass_d(%3d,-max_branch_used:-1,lmaxconfigs_used)" % nfksconfs) 1057 lines.append("double precision pwidth_d(%3d,-max_branch_used:-1,lmaxconfigs_used)" % nfksconfs) 1058 lines.append("integer pow_d(%3d,-max_branch_used:-1,lmaxconfigs_used)" % nfksconfs) 1059 1060 writer.writelines(lines)
1061 1062
1063 - def write_configs_and_props_info_file(self, filename, matrix_element):
1064 """writes the configs_and_props_info.inc file that cointains 1065 all the (real-emission) configurations (IFOREST) as well as 1066 the masses and widths of intermediate particles""" 1067 lines = [] 1068 lines.append("# C -> MAPCONFIG_D") 1069 lines.append("# F/D -> IFOREST_D") 1070 lines.append("# S -> SPROP_D") 1071 lines.append("# T -> TPRID_D") 1072 lines.append("# M -> PMASS_D/PWIDTH_D") 1073 lines.append("# P -> POW_D") 1074 lines2 = [] 1075 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 1076 1077 max_iconfig=0 1078 max_leg_number=0 1079 1080 ######################################################## 1081 # this is for standard processes with [(real=)XXX] 1082 ######################################################## 1083 for iFKS, conf in enumerate(matrix_element.get_fks_info_list()): 1084 iFKS=iFKS+1 1085 iconfig = 0 1086 s_and_t_channels = [] 1087 mapconfigs = [] 1088 fks_matrix_element=matrix_element.real_processes[conf['n_me'] - 1].matrix_element 1089 base_diagrams = fks_matrix_element.get('base_amplitude').get('diagrams') 1090 model = fks_matrix_element.get('base_amplitude').get('process').get('model') 1091 minvert = min([max([len(vert.get('legs')) for vert in \ 1092 diag.get('vertices')]) for diag in base_diagrams]) 1093 1094 lines.append("# ") 1095 lines.append("# nFKSprocess %d" % iFKS) 1096 for idiag, diag in enumerate(base_diagrams): 1097 if any([len(vert.get('legs')) > minvert for vert in 1098 diag.get('vertices')]): 1099 # Only 3-vertices allowed in configs.inc 1100 continue 1101 iconfig = iconfig + 1 1102 helas_diag = fks_matrix_element.get('diagrams')[idiag] 1103 mapconfigs.append(helas_diag.get('number')) 1104 lines.append("# Diagram %d for nFKSprocess %d" % \ 1105 (helas_diag.get('number'),iFKS)) 1106 # Correspondance between the config and the amplitudes 1107 lines.append("C %4d %4d %4d " % (iFKS,iconfig, 1108 helas_diag.get('number'))) 1109 1110 # Need to reorganize the topology so that we start with all 1111 # final state external particles and work our way inwards 1112 schannels, tchannels = helas_diag.get('amplitudes')[0].\ 1113 get_s_and_t_channels(ninitial, model, 990) 1114 1115 s_and_t_channels.append([schannels, tchannels]) 1116 1117 # Write out propagators for s-channel and t-channel vertices 1118 allchannels = schannels 1119 if len(tchannels) > 1: 1120 # Write out tchannels only if there are any non-trivial ones 1121 allchannels = schannels + tchannels 1122 1123 for vert in allchannels: 1124 daughters = [leg.get('number') for leg in vert.get('legs')[:-1]] 1125 last_leg = vert.get('legs')[-1] 1126 lines.append("F %4d %4d %4d %4d" % \ 1127 (iFKS,last_leg.get('number'), iconfig, len(daughters))) 1128 for d in daughters: 1129 lines.append("D %4d" % d) 1130 if vert in schannels: 1131 lines.append("S %4d %4d %4d %10d" % \ 1132 (iFKS,last_leg.get('number'), iconfig, 1133 last_leg.get('id'))) 1134 elif vert in tchannels[:-1]: 1135 lines.append("T %4d %4d %4d %10d" % \ 1136 (iFKS,last_leg.get('number'), iconfig, 1137 abs(last_leg.get('id')))) 1138 1139 # update what the array sizes (mapconfig,iforest,etc) will be 1140 max_leg_number = min(max_leg_number,last_leg.get('number')) 1141 max_iconfig = max(max_iconfig,iconfig) 1142 1143 # Write out number of configs 1144 lines.append("# Number of configs for nFKSprocess %d" % iFKS) 1145 lines.append("C %4d %4d %4d" % (iFKS,0,iconfig)) 1146 1147 # write the props.inc information 1148 lines2.append("# ") 1149 particle_dict = fks_matrix_element.get('processes')[0].get('model').\ 1150 get('particle_dict') 1151 1152 for iconf, configs in enumerate(s_and_t_channels): 1153 for vertex in configs[0] + configs[1][:-1]: 1154 leg = vertex.get('legs')[-1] 1155 if leg.get('id') not in particle_dict: 1156 # Fake propagator used in multiparticle vertices 1157 pow_part = 0 1158 else: 1159 particle = particle_dict[leg.get('id')] 1160 1161 pow_part = 1 + int(particle.is_boson()) 1162 1163 lines2.append("M %4d %4d %4d %10d " % \ 1164 (iFKS,leg.get('number'), iconf + 1, leg.get('id'))) 1165 lines2.append("P %4d %4d %4d %4d " % \ 1166 (iFKS,leg.get('number'), iconf + 1, pow_part)) 1167 1168 ######################################################## 1169 # this is for [LOonly=XXX] 1170 ######################################################## 1171 if not matrix_element.get_fks_info_list(): 1172 born_me = matrix_element.born_matrix_element 1173 # as usual, in this case we assume just one FKS configuration 1174 # exists with diagrams corresponding to born ones X the ij -> i,j 1175 # splitting. Here j is chosen to be the last colored particle in 1176 # the particle list 1177 bornproc = born_me.get('processes')[0] 1178 colors = [l.get('color') for l in bornproc.get('legs')] 1179 1180 fks_i = len(colors) 1181 # use the last colored particle if it exists, or 1182 # just the last 1183 fks_j=1 1184 for cpos, col in enumerate(colors): 1185 if col != 1: 1186 fks_j = cpos+1 1187 fks_j_id = [l.get('id') for l in bornproc.get('legs')][cpos] 1188 1189 # for the moment, if j is initial-state, we do nothing 1190 if fks_j > ninitial: 1191 iFKS=1 1192 iconfig = 0 1193 s_and_t_channels = [] 1194 mapconfigs = [] 1195 base_diagrams = born_me.get('base_amplitude').get('diagrams') 1196 model = born_me.get('base_amplitude').get('process').get('model') 1197 minvert = min([max([len(vert.get('legs')) for vert in \ 1198 diag.get('vertices')]) for diag in base_diagrams]) 1199 1200 lines.append("# ") 1201 lines.append("# nFKSprocess %d" % iFKS) 1202 for idiag, diag in enumerate(base_diagrams): 1203 if any([len(vert.get('legs')) > minvert for vert in 1204 diag.get('vertices')]): 1205 # Only 3-vertices allowed in configs.inc 1206 continue 1207 iconfig = iconfig + 1 1208 helas_diag = born_me.get('diagrams')[idiag] 1209 mapconfigs.append(helas_diag.get('number')) 1210 lines.append("# Diagram %d for nFKSprocess %d" % \ 1211 (helas_diag.get('number'),iFKS)) 1212 # Correspondance between the config and the amplitudes 1213 lines.append("C %4d %4d %4d " % (iFKS,iconfig, 1214 helas_diag.get('number'))) 1215 1216 # Need to reorganize the topology so that we start with all 1217 # final state external particles and work our way inwards 1218 schannels, tchannels = helas_diag.get('amplitudes')[0].\ 1219 get_s_and_t_channels(ninitial, model, 990) 1220 1221 s_and_t_channels.append([schannels, tchannels]) 1222 1223 #the first thing to write is the splitting ij -> i,j 1224 lines.append("F %4d %4d %4d %4d" % \ 1225 (iFKS,-1,iconfig,2)) 1226 #(iFKS,last_leg.get('number'), iconfig, len(daughters))) 1227 lines.append("D %4d" % nexternal) 1228 lines.append("D %4d" % fks_j) 1229 lines.append("S %4d %4d %4d %10d" % \ 1230 (iFKS,-1, iconfig,fks_j_id)) 1231 # now we continue with all the other vertices of the diagrams; 1232 # we need to shift the 'last_leg' by 1 and replace leg fks_j with -1 1233 1234 # Write out propagators for s-channel and t-channel vertices 1235 allchannels = schannels 1236 if len(tchannels) > 1: 1237 # Write out tchannels only if there are any non-trivial ones 1238 allchannels = schannels + tchannels 1239 1240 for vert in allchannels: 1241 daughters = [leg.get('number') for leg in vert.get('legs')[:-1]] 1242 last_leg = vert.get('legs')[-1] 1243 lines.append("F %4d %4d %4d %4d" % \ 1244 (iFKS,last_leg.get('number')-1, iconfig, len(daughters))) 1245 1246 # legs with negative number in daughters have to be shifted by -1 1247 for i_dau in range(len(daughters)): 1248 if daughters[i_dau] < 0: 1249 daughters[i_dau] += -1 1250 # finally relable fks with -1 if it appears in daughters 1251 if fks_j in daughters: 1252 daughters[daughters.index(fks_j)] = -1 1253 for d in daughters: 1254 lines.append("D %4d" % d) 1255 if vert in schannels: 1256 lines.append("S %4d %4d %4d %10d" % \ 1257 (iFKS,last_leg.get('number')-1, iconfig, 1258 last_leg.get('id'))) 1259 elif vert in tchannels[:-1]: 1260 lines.append("T %4d %4d %4d %10d" % \ 1261 (iFKS,last_leg.get('number')-1, iconfig, 1262 abs(last_leg.get('id')))) 1263 1264 # update what the array sizes (mapconfig,iforest,etc) will be 1265 max_leg_number = min(max_leg_number,last_leg.get('number')-1) 1266 max_iconfig = max(max_iconfig,iconfig) 1267 1268 # Write out number of configs 1269 lines.append("# Number of configs for nFKSprocess %d" % iFKS) 1270 lines.append("C %4d %4d %4d" % (iFKS,0,iconfig)) 1271 1272 # write the props.inc information 1273 lines2.append("# ") 1274 particle_dict = born_me.get('processes')[0].get('model').\ 1275 get('particle_dict') 1276 1277 for iconf, configs in enumerate(s_and_t_channels): 1278 lines2.append("M %4d %4d %4d %10d " % \ 1279 (iFKS,-1, iconf + 1, fks_j_id)) 1280 pow_part = 1 + int(particle_dict[fks_j_id].is_boson()) 1281 lines2.append("P %4d %4d %4d %4d " % \ 1282 (iFKS,-1, iconf + 1, pow_part)) 1283 for vertex in configs[0] + configs[1][:-1]: 1284 leg = vertex.get('legs')[-1] 1285 if leg.get('id') not in particle_dict: 1286 # Fake propagator used in multiparticle vertices 1287 pow_part = 0 1288 else: 1289 particle = particle_dict[leg.get('id')] 1290 1291 pow_part = 1 + int(particle.is_boson()) 1292 1293 lines2.append("M %4d %4d %4d %10d " % \ 1294 (iFKS,leg.get('number')-1, iconf + 1, leg.get('id'))) 1295 lines2.append("P %4d %4d %4d %4d " % \ 1296 (iFKS,leg.get('number')-1, iconf + 1, pow_part)) 1297 1298 # Write the file 1299 open(filename,'w').write('\n'.join(lines+lines2)) 1300 1301 return max_iconfig, max_leg_number
1302 1303
1304 - def write_leshouche_info_declarations(self, writer, nfksconfs, 1305 maxproc, maxflow, nexternal, fortran_model):
1306 """writes the declarations for the variables relevant for leshouche_info 1307 """ 1308 lines = [] 1309 lines.append('integer maxproc_used, maxflow_used') 1310 lines.append('parameter (maxproc_used = %d)' % maxproc) 1311 lines.append('parameter (maxflow_used = %d)' % maxflow) 1312 lines.append('integer idup_d(%d,%d,maxproc_used)' % (nfksconfs, nexternal)) 1313 lines.append('integer mothup_d(%d,%d,%d,maxproc_used)' % (nfksconfs, 2, nexternal)) 1314 lines.append('integer icolup_d(%d,%d,%d,maxflow_used)' % (nfksconfs, 2, nexternal)) 1315 lines.append('integer niprocs_d(%d)' % (nfksconfs)) 1316 1317 writer.writelines(lines)
1318 1319
1320 - def write_genps(self, writer, maxproc,ngraphs,ncolor,maxflow, fortran_model):
1321 """writes the genps.inc file 1322 """ 1323 lines = [] 1324 lines.append("include 'maxparticles.inc'") 1325 lines.append("include 'maxconfigs.inc'") 1326 lines.append("integer maxproc,ngraphs,ncolor,maxflow") 1327 lines.append("parameter (maxproc=%d,ngraphs=%d,ncolor=%d,maxflow=%d)" % \ 1328 (maxproc,ngraphs,ncolor,maxflow)) 1329 writer.writelines(lines)
1330 1331
1332 - def write_leshouche_info_file(self, filename, matrix_element):
1333 """writes the leshouche_info.inc file which contains 1334 the LHA informations for all the real emission processes 1335 """ 1336 lines = [] 1337 lines.append("# I -> IDUP_D") 1338 lines.append("# M -> MOTHUP_D") 1339 lines.append("# C -> ICOLUP_D") 1340 nfksconfs = len(matrix_element.get_fks_info_list()) 1341 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 1342 1343 maxproc = 0 1344 maxflow = 0 1345 for i, conf in enumerate(matrix_element.get_fks_info_list()): 1346 # for i, real in enumerate(matrix_element.real_processes): 1347 (newlines, nprocs, nflows) = self.get_leshouche_lines( 1348 matrix_element.real_processes[conf['n_me'] - 1].matrix_element, i + 1) 1349 lines.extend(newlines) 1350 maxproc = max(maxproc, nprocs) 1351 maxflow = max(maxflow, nflows) 1352 1353 # this is for LOonly 1354 if not matrix_element.get_fks_info_list(): 1355 (newlines, nprocs, nflows) = self.get_leshouche_lines_dummy(matrix_element.born_matrix_element, 1) 1356 lines.extend(newlines) 1357 1358 # Write the file 1359 open(filename,'w').write('\n'.join(lines)) 1360 1361 return nfksconfs, maxproc, maxflow, nexternal
1362 1363
1364 - def write_pdf_wrapper(self, writer, matrix_element, fortran_model):
1365 """writes the wrapper which allows to chose among the different real matrix elements""" 1366 1367 file = \ 1368 """double precision function dlum() 1369 implicit none 1370 integer nfksprocess 1371 common/c_nfksprocess/nfksprocess 1372 """ 1373 if matrix_element.real_processes: 1374 for n, info in enumerate(matrix_element.get_fks_info_list()): 1375 file += \ 1376 """if (nfksprocess.eq.%(n)d) then 1377 call dlum_%(n_me)d(dlum) 1378 else""" % {'n': n + 1, 'n_me' : info['n_me']} 1379 file += \ 1380 """ 1381 write(*,*) 'ERROR: invalid n in dlum :', nfksprocess 1382 stop 1383 endif 1384 return 1385 end 1386 """ 1387 else: 1388 file+= \ 1389 """call dlum_0(dlum) 1390 return 1391 end 1392 """ 1393 1394 # Write the file 1395 writer.writelines(file) 1396 return 0
1397 1398
1399 - def write_real_me_wrapper(self, writer, matrix_element, fortran_model):
1400 """writes the wrapper which allows to chose among the different real matrix elements""" 1401 1402 file = \ 1403 """subroutine smatrix_real(p, wgt) 1404 implicit none 1405 include 'nexternal.inc' 1406 double precision p(0:3, nexternal) 1407 double precision wgt 1408 integer nfksprocess 1409 common/c_nfksprocess/nfksprocess 1410 """ 1411 for n, info in enumerate(matrix_element.get_fks_info_list()): 1412 file += \ 1413 """if (nfksprocess.eq.%(n)d) then 1414 call smatrix_%(n_me)d(p, wgt) 1415 else""" % {'n': n + 1, 'n_me' : info['n_me']} 1416 1417 if matrix_element.real_processes: 1418 file += \ 1419 """ 1420 write(*,*) 'ERROR: invalid n in real_matrix :', nfksprocess 1421 stop 1422 endif 1423 return 1424 end 1425 """ 1426 else: 1427 file += \ 1428 """ 1429 wgt=0d0 1430 return 1431 end 1432 """ 1433 # Write the file 1434 writer.writelines(file) 1435 return 0
1436 1437
1438 - def draw_feynman_diagrams(self, matrix_element):
1439 """Create the ps files containing the feynman diagrams for the born process, 1440 as well as for all the real emission processes""" 1441 1442 filename = 'born.ps' 1443 plot = draw.MultiEpsDiagramDrawer(matrix_element.born_matrix_element.\ 1444 get('base_amplitude').get('diagrams'), 1445 filename, 1446 model=matrix_element.born_matrix_element.\ 1447 get('processes')[0].get('model'), 1448 amplitude=True, diagram_type='born') 1449 plot.draw() 1450 1451 for n, fksreal in enumerate(matrix_element.real_processes): 1452 filename = 'matrix_%d.ps' % (n + 1) 1453 plot = draw.MultiEpsDiagramDrawer(fksreal.matrix_element.\ 1454 get('base_amplitude').get('diagrams'), 1455 filename, 1456 model=fksreal.matrix_element.\ 1457 get('processes')[0].get('model'), 1458 amplitude=True, diagram_type='real') 1459 plot.draw()
1460 1461
1462 - def write_real_matrix_elements(self, matrix_element, fortran_model):
1463 """writes the matrix_i.f files which contain the real matrix elements""" 1464 1465 1466 1467 for n, fksreal in enumerate(matrix_element.real_processes): 1468 filename = 'matrix_%d.f' % (n + 1) 1469 self.write_matrix_element_fks(writers.FortranWriter(filename), 1470 fksreal.matrix_element, n + 1, 1471 fortran_model)
1472
1473 - def write_pdf_calls(self, matrix_element, fortran_model):
1474 """writes the parton_lum_i.f files which contain the real matrix elements. 1475 If no real emission existst, write the one for the born""" 1476 1477 if matrix_element.real_processes: 1478 for n, fksreal in enumerate(matrix_element.real_processes): 1479 filename = 'parton_lum_%d.f' % (n + 1) 1480 self.write_pdf_file(writers.FortranWriter(filename), 1481 fksreal.matrix_element, n + 1, 1482 fortran_model) 1483 else: 1484 filename = 'parton_lum_0.f' 1485 self.write_pdf_file(writers.FortranWriter(filename), 1486 matrix_element.born_matrix_element, 0, 1487 fortran_model)
1488 1489
1490 - def generate_born_fks_files(self, matrix_element, fortran_model, me_number, path):
1491 """generates the files needed for the born amplitude in the P* directory, which will 1492 be needed by the P* directories""" 1493 pathdir = os.getcwd() 1494 1495 filename = 'born.f' 1496 calls_born, ncolor_born = \ 1497 self.write_born_fks(writers.FortranWriter(filename),\ 1498 matrix_element, 1499 fortran_model) 1500 1501 filename = 'born_hel.f' 1502 self.write_born_hel(writers.FortranWriter(filename),\ 1503 matrix_element, 1504 fortran_model) 1505 1506 1507 filename = 'born_conf.inc' 1508 nconfigs, mapconfigs, s_and_t_channels = \ 1509 self.write_configs_file( 1510 writers.FortranWriter(filename), 1511 matrix_element.born_matrix_element, 1512 fortran_model) 1513 1514 filename = 'born_props.inc' 1515 self.write_props_file(writers.FortranWriter(filename), 1516 matrix_element.born_matrix_element, 1517 fortran_model, 1518 s_and_t_channels) 1519 1520 filename = 'born_decayBW.inc' 1521 self.write_decayBW_file(writers.FortranWriter(filename), 1522 s_and_t_channels) 1523 1524 filename = 'born_leshouche.inc' 1525 nflows = self.write_leshouche_file(writers.FortranWriter(filename), 1526 matrix_element.born_matrix_element, 1527 fortran_model) 1528 1529 filename = 'born_nhel.inc' 1530 self.write_born_nhel_file(writers.FortranWriter(filename), 1531 matrix_element.born_matrix_element, nflows, 1532 fortran_model, 1533 ncolor_born) 1534 1535 filename = 'born_ngraphs.inc' 1536 self.write_ngraphs_file(writers.FortranWriter(filename), 1537 matrix_element.born_matrix_element.get_number_of_amplitudes()) 1538 1539 filename = 'ncombs.inc' 1540 self.write_ncombs_file(writers.FortranWriter(filename), 1541 matrix_element.born_matrix_element, 1542 fortran_model) 1543 1544 filename = 'born_maxamps.inc' 1545 maxamps = len(matrix_element.get('diagrams')) 1546 maxflows = ncolor_born 1547 self.write_maxamps_file(writers.FortranWriter(filename), 1548 maxamps, 1549 maxflows, 1550 max([len(matrix_element.get('processes')) for me in \ 1551 matrix_element.born_matrix_element]),1) 1552 1553 filename = 'config_subproc_map.inc' 1554 self.write_config_subproc_map_file(writers.FortranWriter(filename), 1555 s_and_t_channels) 1556 1557 filename = 'coloramps.inc' 1558 self.write_coloramps_file(writers.FortranWriter(filename), 1559 mapconfigs, 1560 matrix_element.born_matrix_element, 1561 fortran_model) 1562 1563 #write the sborn_sf.f and the b_sf_files 1564 filename = ['sborn_sf.f', 'sborn_sf_dum.f'] 1565 for i, links in enumerate([matrix_element.color_links, []]): 1566 self.write_sborn_sf(writers.FortranWriter(filename[i]), 1567 links, 1568 fortran_model) 1569 self.color_link_files = [] 1570 for i in range(len(matrix_element.color_links)): 1571 filename = 'b_sf_%3.3d.f' % (i + 1) 1572 self.color_link_files.append(filename) 1573 self.write_b_sf_fks(writers.FortranWriter(filename), 1574 matrix_element, i, 1575 fortran_model)
1576 1577
1578 - def generate_virtuals_from_OLP(self,process_list,export_path, OLP):
1579 """Generates the library for computing the loop matrix elements 1580 necessary for this process using the OLP specified.""" 1581 1582 # Start by writing the BLHA order file 1583 virtual_path = pjoin(export_path,'OLP_virtuals') 1584 if not os.path.exists(virtual_path): 1585 os.makedirs(virtual_path) 1586 filename = os.path.join(virtual_path,'OLE_order.lh') 1587 self.write_lh_order(filename, process_list, OLP) 1588 1589 fail_msg='Generation of the virtuals with %s failed.\n'%OLP+\ 1590 'Please check the virt_generation.log file in %s.'\ 1591 %str(pjoin(virtual_path,'virt_generation.log')) 1592 1593 # Perform some tasks specific to certain OLP's 1594 if OLP=='GoSam': 1595 cp(pjoin(self.mgme_dir,'Template','loop_material','OLP_specifics', 1596 'GoSam','makevirt'),pjoin(virtual_path,'makevirt')) 1597 cp(pjoin(self.mgme_dir,'Template','loop_material','OLP_specifics', 1598 'GoSam','gosam.rc'),pjoin(virtual_path,'gosam.rc')) 1599 ln(pjoin(export_path,'Cards','param_card.dat'),virtual_path) 1600 # Now generate the process 1601 logger.info('Generating the loop matrix elements with %s...'%OLP) 1602 virt_generation_log = \ 1603 open(pjoin(virtual_path,'virt_generation.log'), 'w') 1604 retcode = subprocess.call(['./makevirt'],cwd=virtual_path, 1605 stdout=virt_generation_log, stderr=virt_generation_log) 1606 virt_generation_log.close() 1607 # Check what extension is used for the share libraries on this system 1608 possible_other_extensions = ['so','dylib'] 1609 shared_lib_ext='so' 1610 for ext in possible_other_extensions: 1611 if os.path.isfile(pjoin(virtual_path,'Virtuals','lib', 1612 'libgolem_olp.'+ext)): 1613 shared_lib_ext = ext 1614 1615 # Now check that everything got correctly generated 1616 files_to_check = ['olp_module.mod',str(pjoin('lib', 1617 'libgolem_olp.'+shared_lib_ext))] 1618 if retcode != 0 or any([not os.path.exists(pjoin(virtual_path, 1619 'Virtuals',f)) for f in files_to_check]): 1620 raise fks_common.FKSProcessError(fail_msg) 1621 # link the library to the lib folder 1622 ln(pjoin(virtual_path,'Virtuals','lib','libgolem_olp.'+shared_lib_ext), 1623 pjoin(export_path,'lib')) 1624 1625 # Specify in make_opts the right library necessitated by the OLP 1626 make_opts_content=open(pjoin(export_path,'Source','make_opts')).read() 1627 make_opts=open(pjoin(export_path,'Source','make_opts'),'w') 1628 if OLP=='GoSam': 1629 if platform.system().lower()=='darwin': 1630 # On mac the -rpath is not supported and the path of the dynamic 1631 # library is automatically wired in the executable 1632 make_opts_content=make_opts_content.replace('libOLP=', 1633 'libOLP=-Wl,-lgolem_olp') 1634 else: 1635 # On other platforms the option , -rpath= path to libgolem.so is necessary 1636 # Using a relative path is not ideal because the file libgolem.so is not 1637 # copied on the worker nodes. 1638 # make_opts_content=make_opts_content.replace('libOLP=', 1639 # 'libOLP=-Wl,-rpath=../$(LIBDIR) -lgolem_olp') 1640 # Using the absolute path is working in the case where the disk of the 1641 # front end machine is mounted on all worker nodes as well. 1642 make_opts_content=make_opts_content.replace('libOLP=', 1643 'libOLP=-Wl,-rpath='+str(pjoin(export_path,'lib'))+' -lgolem_olp') 1644 1645 1646 make_opts.write(make_opts_content) 1647 make_opts.close() 1648 1649 # A priori this is generic to all OLP's 1650 1651 # Parse the contract file returned and propagate the process label to 1652 # the include of the BinothLHA.f file 1653 proc_to_label = self.parse_contract_file( 1654 pjoin(virtual_path,'OLE_order.olc')) 1655 1656 self.write_BinothLHA_inc(process_list,proc_to_label,\ 1657 pjoin(export_path,'SubProcesses')) 1658 1659 # Link the contract file to within the SubProcess directory 1660 ln(pjoin(virtual_path,'OLE_order.olc'),pjoin(export_path,'SubProcesses'))
1661
1662 - def write_BinothLHA_inc(self, processes, proc_to_label, SubProcPath):
1663 """ Write the file Binoth_proc.inc in each SubProcess directory so as 1664 to provide the right process_label to use in the OLP call to get the 1665 loop matrix element evaluation. The proc_to_label is the dictionary of 1666 the format of the one returned by the function parse_contract_file.""" 1667 1668 for proc in processes: 1669 name = "P%s"%proc.shell_string() 1670 proc_pdgs=(tuple([leg.get('id') for leg in proc.get('legs') if \ 1671 not leg.get('state')]), 1672 tuple([leg.get('id') for leg in proc.get('legs') if \ 1673 leg.get('state')])) 1674 incFile = open(pjoin(SubProcPath, name,'Binoth_proc.inc'),'w') 1675 try: 1676 incFile.write( 1677 """ INTEGER PROC_LABEL 1678 PARAMETER (PROC_LABEL=%d)"""%(proc_to_label[proc_pdgs])) 1679 except KeyError: 1680 raise fks_common.FKSProcessError('Could not found the target'+\ 1681 ' process %s > %s in '%(str(proc_pdgs[0]),str(proc_pdgs[1]))+\ 1682 ' the proc_to_label argument in write_BinothLHA_inc.') 1683 incFile.close()
1684
1685 - def parse_contract_file(self, contract_file_path):
1686 """ Parses the BLHA contract file, make sure all parameters could be 1687 understood by the OLP and return a mapping of the processes (characterized 1688 by the pdg's of the initial and final state particles) to their process 1689 label. The format of the mapping is {((in_pdgs),(out_pdgs)):proc_label}. 1690 """ 1691 1692 proc_def_to_label = {} 1693 1694 if not os.path.exists(contract_file_path): 1695 raise fks_common.FKSProcessError('Could not find the contract file'+\ 1696 ' OLE_order.olc in %s.'%str(contract_file_path)) 1697 1698 comment_re=re.compile(r"^\s*#") 1699 proc_def_re=re.compile( 1700 r"^(?P<in_pdgs>(\s*-?\d+\s*)+)->(?P<out_pdgs>(\s*-?\d+\s*)+)\|"+ 1701 r"\s*(?P<proc_class>\d+)\s*(?P<proc_label>\d+)\s*$") 1702 line_OK_re=re.compile(r"^.*\|\s*OK") 1703 for line in file(contract_file_path): 1704 # Ignore comments 1705 if not comment_re.match(line) is None: 1706 continue 1707 # Check if it is a proc definition line 1708 proc_def = proc_def_re.match(line) 1709 if not proc_def is None: 1710 if int(proc_def.group('proc_class'))!=1: 1711 raise fks_common.FKSProcessError( 1712 'aMCatNLO can only handle loop processes generated by the OLP which have only '+\ 1713 ' process class attribute. Found %s instead in: \n%s'\ 1714 %(proc_def.group('proc_class'),line)) 1715 in_pdgs=tuple([int(in_pdg) for in_pdg in \ 1716 proc_def.group('in_pdgs').split()]) 1717 out_pdgs=tuple([int(out_pdg) for out_pdg in \ 1718 proc_def.group('out_pdgs').split()]) 1719 proc_def_to_label[(in_pdgs,out_pdgs)]=\ 1720 int(proc_def.group('proc_label')) 1721 continue 1722 # For the other types of line, just make sure they end with | OK 1723 if line_OK_re.match(line) is None: 1724 raise fks_common.FKSProcessError( 1725 'The OLP could not process the following line: \n%s'%line) 1726 1727 return proc_def_to_label
1728 1729
1730 - def generate_virt_directory(self, loop_matrix_element, fortran_model, dir_name):
1731 """writes the V**** directory inside the P**** directories specified in 1732 dir_name""" 1733 1734 cwd = os.getcwd() 1735 1736 matrix_element = loop_matrix_element 1737 1738 # Create the MadLoop5_resources directory if not already existing 1739 dirpath = os.path.join(dir_name, 'MadLoop5_resources') 1740 try: 1741 os.mkdir(dirpath) 1742 except os.error as error: 1743 logger.warning(error.strerror + " " + dirpath) 1744 1745 # Create the directory PN_xx_xxxxx in the specified path 1746 name = "V%s" % matrix_element.get('processes')[0].shell_string() 1747 dirpath = os.path.join(dir_name, name) 1748 1749 try: 1750 os.mkdir(dirpath) 1751 except os.error as error: 1752 logger.warning(error.strerror + " " + dirpath) 1753 1754 try: 1755 os.chdir(dirpath) 1756 except os.error: 1757 logger.error('Could not cd to directory %s' % dirpath) 1758 return 0 1759 1760 logger.info('Creating files in directory %s' % name) 1761 1762 # Extract number of external particles 1763 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 1764 1765 calls=self.write_loop_matrix_element_v4(None,matrix_element,fortran_model) 1766 # The born matrix element, if needed 1767 filename = 'born_matrix.f' 1768 calls = self.write_bornmatrix( 1769 writers.FortranWriter(filename), 1770 matrix_element, 1771 fortran_model) 1772 1773 filename = 'nexternal.inc' 1774 self.write_nexternal_file(writers.FortranWriter(filename), 1775 nexternal, ninitial) 1776 1777 filename = 'pmass.inc' 1778 self.write_pmass_file(writers.FortranWriter(filename), 1779 matrix_element) 1780 1781 filename = 'ngraphs.inc' 1782 self.write_ngraphs_file(writers.FortranWriter(filename), 1783 len(matrix_element.get_all_amplitudes())) 1784 1785 filename = "loop_matrix.ps" 1786 plot = draw.MultiEpsDiagramDrawer(base_objects.DiagramList( 1787 matrix_element.get('base_amplitude').get('loop_diagrams')[:1000]), 1788 filename, 1789 model=matrix_element.get('processes')[0].get('model'), 1790 amplitude='') 1791 logger.info("Drawing loop Feynman diagrams for " + \ 1792 matrix_element.get('processes')[0].nice_string(print_weighted=False)) 1793 plot.draw() 1794 1795 filename = "born_matrix.ps" 1796 plot = draw.MultiEpsDiagramDrawer(matrix_element.get('base_amplitude').\ 1797 get('born_diagrams'),filename,model=matrix_element.get('processes')[0].\ 1798 get('model'),amplitude='') 1799 logger.info("Generating born Feynman diagrams for " + \ 1800 matrix_element.get('processes')[0].nice_string(print_weighted=False)) 1801 plot.draw() 1802 1803 # We also need to write the overall maximum quantities for this group 1804 # of processes in 'global_specs.inc'. In aMCatNLO, there is always 1805 # only one process, so this is trivial 1806 self.write_global_specs(matrix_element, output_path=pjoin(dirpath,'global_specs.inc')) 1807 open('unique_id.inc','w').write( 1808 """ integer UNIQUE_ID 1809 parameter(UNIQUE_ID=1)""") 1810 1811 linkfiles = ['coupl.inc', 'mp_coupl.inc', 'mp_coupl_same_name.inc', 1812 'cts_mprec.h', 'cts_mpc.h', 'MadLoopParamReader.f', 1813 'MadLoopCommons.f','MadLoopParams.inc'] 1814 1815 # We should move to MadLoop5_resources directory from the SubProcesses 1816 ln(pjoin(os.path.pardir,os.path.pardir,'MadLoopParams.dat'), 1817 pjoin('..','MadLoop5_resources')) 1818 1819 for file in linkfiles: 1820 ln('../../%s' % file) 1821 1822 os.system("ln -s ../../makefile_loop makefile") 1823 1824 linkfiles = ['mpmodule.mod'] 1825 1826 for file in linkfiles: 1827 ln('../../../lib/%s' % file) 1828 1829 linkfiles = ['coef_specs.inc'] 1830 1831 for file in linkfiles: 1832 ln('../../../Source/DHELAS/%s' % file) 1833 1834 # Return to original PWD 1835 os.chdir(cwd) 1836 1837 if not calls: 1838 calls = 0 1839 return calls
1840
1841 - def get_qed_qcd_orders_from_weighted(self, nexternal, weighted):
1842 """computes the QED/QCD orders from the knowledge of the n of ext particles 1843 and of the weighted orders""" 1844 # n vertices = nexternal - 2 =QED + QCD 1845 # weighted = 2*QED + QCD 1846 QED = weighted - nexternal + 2 1847 QCD = weighted - 2 * QED 1848 return QED, QCD
1849 1850 1851 1852 #=============================================================================== 1853 # write_lh_order 1854 #=============================================================================== 1855 #test written
1856 - def write_lh_order(self, filename, process_list, OLP='MadLoop'):
1857 """Creates the OLE_order.lh file. This function should be edited according 1858 to the OLP which is used. For now it is generic.""" 1859 1860 1861 if len(process_list)==0: 1862 raise fks_common.FKSProcessError('No matrix elements provided to '+\ 1863 'the function write_lh_order.') 1864 return 1865 1866 # We assume the orders to be common to all Subprocesses 1867 1868 orders = process_list[0].get('orders') 1869 if 'QED' in orders.keys() and 'QCD' in orders.keys(): 1870 QED=orders['QED'] 1871 QCD=orders['QCD'] 1872 elif 'QED' in orders.keys(): 1873 QED=orders['QED'] 1874 QCD=0 1875 elif 'QCD' in orders.keys(): 1876 QED=0 1877 QCD=orders['QCD'] 1878 else: 1879 QED, QCD = self.get_qed_qcd_orders_from_weighted(\ 1880 len(process_list[0].get('legs')), 1881 orders['WEIGHTED']) 1882 1883 replace_dict = {} 1884 replace_dict['mesq'] = 'CHaveraged' 1885 replace_dict['corr'] = ' '.join(process_list[0].\ 1886 get('perturbation_couplings')) 1887 replace_dict['irreg'] = 'CDR' 1888 replace_dict['aspow'] = QCD 1889 replace_dict['aepow'] = QED 1890 replace_dict['modelfile'] = './param_card.dat' 1891 replace_dict['params'] = 'alpha_s' 1892 proc_lines=[] 1893 for proc in process_list: 1894 proc_lines.append('%s -> %s' % \ 1895 (' '.join(str(l['id']) for l in proc['legs'] if not l['state']), 1896 ' '.join(str(l['id']) for l in proc['legs'] if l['state']))) 1897 replace_dict['pdgs'] = '\n'.join(proc_lines) 1898 replace_dict['symfin'] = 'Yes' 1899 content = \ 1900 "#OLE_order written by MadGraph5_aMC@NLO\n\ 1901 \n\ 1902 MatrixElementSquareType %(mesq)s\n\ 1903 CorrectionType %(corr)s\n\ 1904 IRregularisation %(irreg)s\n\ 1905 AlphasPower %(aspow)d\n\ 1906 AlphaPower %(aepow)d\n\ 1907 NJetSymmetrizeFinal %(symfin)s\n\ 1908 ModelFile %(modelfile)s\n\ 1909 Parameters %(params)s\n\ 1910 \n\ 1911 # process\n\ 1912 %(pdgs)s\n\ 1913 " % replace_dict 1914 1915 file = open(filename, 'w') 1916 file.write(content) 1917 file.close 1918 return
1919 1920 1921 #=============================================================================== 1922 # write_born_fks 1923 #=============================================================================== 1924 # test written
1925 - def write_born_fks(self, writer, fksborn, fortran_model):
1926 """Export a matrix element to a born.f file in MadFKS format""" 1927 1928 matrix_element = fksborn.born_matrix_element 1929 1930 if not matrix_element.get('processes') or \ 1931 not matrix_element.get('diagrams'): 1932 return 0 1933 1934 if not isinstance(writer, writers.FortranWriter): 1935 raise writers.FortranWriter.FortranWriterError(\ 1936 "writer not FortranWriter") 1937 # Set lowercase/uppercase Fortran code 1938 writers.FortranWriter.downcase = False 1939 1940 replace_dict = {} 1941 1942 # Extract version number and date from VERSION file 1943 info_lines = self.get_mg5_info_lines() 1944 replace_dict['info_lines'] = info_lines 1945 1946 # Extract process info lines 1947 process_lines = self.get_process_info_lines(matrix_element) 1948 replace_dict['process_lines'] = process_lines 1949 1950 1951 # Extract ncomb 1952 ncomb = matrix_element.get_helicity_combinations() 1953 replace_dict['ncomb'] = ncomb 1954 1955 # Extract helicity lines 1956 helicity_lines = self.get_helicity_lines(matrix_element) 1957 replace_dict['helicity_lines'] = helicity_lines 1958 1959 # Extract IC line 1960 ic_line = self.get_ic_line(matrix_element) 1961 replace_dict['ic_line'] = ic_line 1962 1963 # Extract overall denominator 1964 # Averaging initial state color, spin, and identical FS particles 1965 #den_factor_line = get_den_factor_line(matrix_element) 1966 1967 # Extract ngraphs 1968 ngraphs = matrix_element.get_number_of_amplitudes() 1969 replace_dict['ngraphs'] = ngraphs 1970 1971 # Extract nwavefuncs 1972 nwavefuncs = matrix_element.get_number_of_wavefunctions() 1973 replace_dict['nwavefuncs'] = nwavefuncs 1974 1975 # Extract ncolor 1976 ncolor = max(1, len(matrix_element.get('color_basis'))) 1977 replace_dict['ncolor'] = ncolor 1978 1979 # Extract color data lines 1980 color_data_lines = self.get_color_data_lines(matrix_element) 1981 replace_dict['color_data_lines'] = "\n".join(color_data_lines) 1982 1983 # Extract helas calls 1984 helas_calls = fortran_model.get_matrix_element_calls(\ 1985 matrix_element) 1986 replace_dict['helas_calls'] = "\n".join(helas_calls) 1987 1988 # Extract amp2 lines 1989 amp2_lines = self.get_amp2_lines(matrix_element) 1990 replace_dict['amp2_lines'] = '\n'.join(amp2_lines) 1991 1992 # Extract JAMP lines 1993 jamp_lines = self.get_JAMP_lines(matrix_element) 1994 replace_dict['jamp_lines'] = '\n'.join(jamp_lines) 1995 1996 # Set the size of Wavefunction 1997 if not self.model or any([p.get('spin') in [4,5] for p in self.model.get('particles') if p]): 1998 replace_dict['wavefunctionsize'] = 20 1999 else: 2000 replace_dict['wavefunctionsize'] = 8 2001 2002 # Extract glu_ij_lines 2003 ij_lines = self.get_ij_lines(fksborn) 2004 replace_dict['ij_lines'] = '\n'.join(ij_lines) 2005 2006 # Extract den_factor_lines 2007 den_factor_lines = self.get_den_factor_lines(fksborn) 2008 replace_dict['den_factor_lines'] = '\n'.join(den_factor_lines) 2009 2010 # Extract the number of FKS process 2011 replace_dict['nconfs'] = max(len(fksborn.get_fks_info_list()),1) 2012 2013 file = open(os.path.join(_file_path, \ 2014 'iolibs/template_files/born_fks.inc')).read() 2015 file = file % replace_dict 2016 2017 # Write the file 2018 writer.writelines(file) 2019 2020 return len(filter(lambda call: call.find('#') != 0, helas_calls)), ncolor
2021 2022
2023 - def write_born_hel(self, writer, fksborn, fortran_model):
2024 """Export a matrix element to a born_hel.f file in MadFKS format""" 2025 2026 matrix_element = fksborn.born_matrix_element 2027 2028 if not matrix_element.get('processes') or \ 2029 not matrix_element.get('diagrams'): 2030 return 0 2031 2032 if not isinstance(writer, writers.FortranWriter): 2033 raise writers.FortranWriter.FortranWriterError(\ 2034 "writer not FortranWriter") 2035 # Set lowercase/uppercase Fortran code 2036 writers.FortranWriter.downcase = False 2037 2038 replace_dict = {} 2039 2040 # Extract version number and date from VERSION file 2041 info_lines = self.get_mg5_info_lines() 2042 replace_dict['info_lines'] = info_lines 2043 2044 # Extract process info lines 2045 process_lines = self.get_process_info_lines(matrix_element) 2046 replace_dict['process_lines'] = process_lines 2047 2048 2049 # Extract ncomb 2050 ncomb = matrix_element.get_helicity_combinations() 2051 replace_dict['ncomb'] = ncomb 2052 2053 # Extract helicity lines 2054 helicity_lines = self.get_helicity_lines(matrix_element) 2055 replace_dict['helicity_lines'] = helicity_lines 2056 2057 # Extract IC line 2058 ic_line = self.get_ic_line(matrix_element) 2059 replace_dict['ic_line'] = ic_line 2060 2061 # Extract overall denominator 2062 # Averaging initial state color, spin, and identical FS particles 2063 #den_factor_line = get_den_factor_line(matrix_element) 2064 2065 # Extract ngraphs 2066 ngraphs = matrix_element.get_number_of_amplitudes() 2067 replace_dict['ngraphs'] = ngraphs 2068 2069 # Extract nwavefuncs 2070 nwavefuncs = matrix_element.get_number_of_wavefunctions() 2071 replace_dict['nwavefuncs'] = nwavefuncs 2072 2073 # Extract ncolor 2074 ncolor = max(1, len(matrix_element.get('color_basis'))) 2075 replace_dict['ncolor'] = ncolor 2076 2077 # Extract color data lines 2078 color_data_lines = self.get_color_data_lines(matrix_element) 2079 replace_dict['color_data_lines'] = "\n".join(color_data_lines) 2080 2081 # Extract amp2 lines 2082 amp2_lines = self.get_amp2_lines(matrix_element) 2083 replace_dict['amp2_lines'] = '\n'.join(amp2_lines) 2084 2085 # Extract JAMP lines 2086 jamp_lines = self.get_JAMP_lines(matrix_element) 2087 replace_dict['jamp_lines'] = '\n'.join(jamp_lines) 2088 2089 # Extract den_factor_lines 2090 den_factor_lines = self.get_den_factor_lines(fksborn) 2091 replace_dict['den_factor_lines'] = '\n'.join(den_factor_lines) 2092 2093 # Extract the number of FKS process 2094 replace_dict['nconfs'] = len(fksborn.get_fks_info_list()) 2095 2096 file = open(os.path.join(_file_path, \ 2097 'iolibs/template_files/born_fks_hel.inc')).read() 2098 file = file % replace_dict 2099 2100 # Write the file 2101 writer.writelines(file) 2102 2103 return
2104 2105 2106 #=============================================================================== 2107 # write_born_sf_fks 2108 #=============================================================================== 2109 #test written
2110 - def write_sborn_sf(self, writer, color_links, fortran_model):
2111 """Creates the sborn_sf.f file, containing the calls to the different 2112 color linked borns""" 2113 2114 replace_dict = {} 2115 nborns = len(color_links) 2116 ifkss = [] 2117 iborns = [] 2118 mms = [] 2119 nns = [] 2120 iflines = "\n" 2121 2122 #header for the sborn_sf.f file 2123 file = """subroutine sborn_sf(p_born,m,n,wgt) 2124 implicit none 2125 include "nexternal.inc" 2126 double precision p_born(0:3,nexternal-1),wgt 2127 double complex wgt1(2) 2128 integer m,n \n""" 2129 2130 if nborns > 0: 2131 2132 for i, c_link in enumerate(color_links): 2133 iborn = i+1 2134 2135 iff = {True : 'if', False : 'elseif'}[i==0] 2136 2137 m, n = c_link['link'] 2138 2139 if m != n: 2140 iflines += \ 2141 "c b_sf_%(iborn)3.3d links partons %(m)d and %(n)d \n\ 2142 %(iff)s ((m.eq.%(m)d .and. n.eq.%(n)d).or.(m.eq.%(n)d .and. n.eq.%(m)d)) then \n\ 2143 call sb_sf_%(iborn)3.3d(p_born,wgt)\n\n" \ 2144 %{'m':m, 'n': n, 'iff': iff, 'iborn': iborn} 2145 else: 2146 iflines += \ 2147 "c b_sf_%(iborn)3.3d links partons %(m)d and %(n)d \n\ 2148 %(iff)s (m.eq.%(m)d .and. n.eq.%(n)d) then \n\ 2149 call sb_sf_%(iborn)3.3d(p_born,wgt)\n\n" \ 2150 %{'m':m, 'n': n, 'iff': iff, 'iborn': iborn} 2151 2152 2153 file += iflines + \ 2154 """else 2155 wgt = 0d0 2156 endif 2157 2158 return 2159 end""" 2160 elif nborns == 0: 2161 #write a dummy file 2162 file+=""" 2163 c This is a dummy function because 2164 c this subdir has no soft singularities 2165 wgt = 0d0 2166 2167 return 2168 end""" 2169 # Write the end of the file 2170 2171 writer.writelines(file)
2172 2173 2174 #=============================================================================== 2175 # write_b_sf_fks 2176 #=============================================================================== 2177 #test written
2178 - def write_b_sf_fks(self, writer, fksborn, i, fortran_model):
2179 """Create the b_sf_xxx.f file for the soft linked born in MadFKS format""" 2180 2181 matrix_element = copy.copy(fksborn.born_matrix_element) 2182 2183 if not matrix_element.get('processes') or \ 2184 not matrix_element.get('diagrams'): 2185 return 0 2186 2187 if not isinstance(writer, writers.FortranWriter): 2188 raise writers.FortranWriter.FortranWriterError(\ 2189 "writer not FortranWriter") 2190 # Set lowercase/uppercase Fortran code 2191 writers.FortranWriter.downcase = False 2192 2193 iborn = i + 1 2194 link = fksborn.color_links[i] 2195 2196 replace_dict = {} 2197 2198 replace_dict['iborn'] = iborn 2199 2200 # Extract version number and date from VERSION file 2201 info_lines = self.get_mg5_info_lines() 2202 replace_dict['info_lines'] = info_lines 2203 2204 # Extract process info lines 2205 process_lines = self.get_process_info_lines(matrix_element) 2206 replace_dict['process_lines'] = process_lines + \ 2207 "\nc spectators: %d %d \n" % tuple(link['link']) 2208 2209 # Extract ncomb 2210 ncomb = matrix_element.get_helicity_combinations() 2211 replace_dict['ncomb'] = ncomb 2212 2213 # Extract helicity lines 2214 helicity_lines = self.get_helicity_lines(matrix_element) 2215 replace_dict['helicity_lines'] = helicity_lines 2216 2217 # Extract IC line 2218 ic_line = self.get_ic_line(matrix_element) 2219 replace_dict['ic_line'] = ic_line 2220 2221 # Extract den_factor_lines 2222 den_factor_lines = self.get_den_factor_lines(fksborn) 2223 replace_dict['den_factor_lines'] = '\n'.join(den_factor_lines) 2224 2225 # Extract ngraphs 2226 ngraphs = matrix_element.get_number_of_amplitudes() 2227 replace_dict['ngraphs'] = ngraphs 2228 2229 # Extract nwavefuncs 2230 nwavefuncs = matrix_element.get_number_of_wavefunctions() 2231 replace_dict['nwavefuncs'] = nwavefuncs 2232 2233 # Extract ncolor 2234 ncolor1 = max(1, len(link['orig_basis'])) 2235 replace_dict['ncolor1'] = ncolor1 2236 ncolor2 = max(1, len(link['link_basis'])) 2237 replace_dict['ncolor2'] = ncolor2 2238 2239 # Extract color data lines 2240 color_data_lines = self.get_color_data_lines_from_color_matrix(\ 2241 link['link_matrix']) 2242 replace_dict['color_data_lines'] = "\n".join(color_data_lines) 2243 2244 # Extract amp2 lines 2245 amp2_lines = self.get_amp2_lines(matrix_element) 2246 replace_dict['amp2_lines'] = '\n'.join(amp2_lines) 2247 2248 # Extract JAMP lines 2249 jamp_lines = self.get_JAMP_lines(matrix_element) 2250 new_jamp_lines = [] 2251 for line in jamp_lines: 2252 line = string.replace(line, 'JAMP', 'JAMP1') 2253 new_jamp_lines.append(line) 2254 replace_dict['jamp1_lines'] = '\n'.join(new_jamp_lines) 2255 2256 matrix_element.set('color_basis', link['link_basis'] ) 2257 jamp_lines = self.get_JAMP_lines(matrix_element) 2258 new_jamp_lines = [] 2259 for line in jamp_lines: 2260 line = string.replace(line, 'JAMP', 'JAMP2') 2261 new_jamp_lines.append(line) 2262 replace_dict['jamp2_lines'] = '\n'.join(new_jamp_lines) 2263 2264 2265 # Extract the number of FKS process 2266 replace_dict['nconfs'] = len(fksborn.get_fks_info_list()) 2267 2268 file = open(os.path.join(_file_path, \ 2269 'iolibs/template_files/b_sf_xxx_fks.inc')).read() 2270 file = file % replace_dict 2271 2272 # Write the file 2273 writer.writelines(file) 2274 2275 return 0 , ncolor1
2276 2277 2278 #=============================================================================== 2279 # write_born_nhel_file 2280 #=============================================================================== 2281 #test written
2282 - def write_born_nhel_file(self, writer, matrix_element, nflows, fortran_model, ncolor):
2283 """Write the born_nhel.inc file for MG4.""" 2284 2285 ncomb = matrix_element.get_helicity_combinations() 2286 file = " integer max_bhel, max_bcol \n" 2287 file = file + "parameter (max_bhel=%d)\nparameter(max_bcol=%d)" % \ 2288 (ncomb, nflows) 2289 2290 # Write the file 2291 writer.writelines(file) 2292 2293 return True
2294 2295 #=============================================================================== 2296 # write_fks_info_file 2297 #===============================================================================
2298 - def write_nfksconfigs_file(self, writer, fksborn, fortran_model):
2299 """Writes the content of nFKSconfigs.inc, which just gives the 2300 total FKS dirs as a parameter. 2301 nFKSconfigs is always >=1 (use a fake configuration for LOonly)""" 2302 replace_dict = {} 2303 replace_dict['nconfs'] = max(len(fksborn.get_fks_info_list()), 1) 2304 content = \ 2305 """ INTEGER FKS_CONFIGS 2306 PARAMETER (FKS_CONFIGS=%(nconfs)d) 2307 2308 """ % replace_dict 2309 2310 writer.writelines(content)
2311 2312 2313 #=============================================================================== 2314 # write_fks_info_file 2315 #===============================================================================
2316 - def write_fks_info_file(self, writer, fksborn, fortran_model): #test_written
2317 """Writes the content of fks_info.inc, which lists the informations on the 2318 possible splittings of the born ME. 2319 nconfs is always >=1 (use a fake configuration for LOonly). 2320 The fake configuration use an 'antigluon' (id -21, color=8) as i_fks and 2321 the last colored particle as j_fks.""" 2322 2323 replace_dict = {} 2324 fks_info_list = fksborn.get_fks_info_list() 2325 replace_dict['nconfs'] = max(len(fks_info_list), 1) 2326 2327 # this is for processes with 'real' or 'all' as NLO mode 2328 if len(fks_info_list) > 0: 2329 fks_i_values = ', '.join(['%d' % info['fks_info']['i'] \ 2330 for info in fks_info_list]) 2331 fks_j_values = ', '.join(['%d' % info['fks_info']['j'] \ 2332 for info in fks_info_list]) 2333 2334 col_lines = [] 2335 pdg_lines = [] 2336 charge_lines = [] 2337 fks_j_from_i_lines = [] 2338 for i, info in enumerate(fks_info_list): 2339 col_lines.append( \ 2340 'DATA (PARTICLE_TYPE_D(%d, IPOS), IPOS=1, NEXTERNAL) / %s /' \ 2341 % (i + 1, ', '.join('%d' % col for col in fksborn.real_processes[info['n_me']-1].colors) )) 2342 pdg_lines.append( \ 2343 'DATA (PDG_TYPE_D(%d, IPOS), IPOS=1, NEXTERNAL) / %s /' \ 2344 % (i + 1, ', '.join('%d' % pdg for pdg in info['pdgs']))) 2345 charge_lines.append(\ 2346 'DATA (PARTICLE_CHARGE_D(%d, IPOS), IPOS=1, NEXTERNAL) / %s /'\ 2347 % (i + 1, ', '.join('%19.15fd0' % charg\ 2348 for charg in fksborn.real_processes[info['n_me']-1].charges) )) 2349 fks_j_from_i_lines.extend(self.get_fks_j_from_i_lines(fksborn.real_processes[info['n_me']-1],\ 2350 i + 1)) 2351 else: 2352 # this is for 'LOonly', generate a fake FKS configuration with 2353 # - i_fks = nexternal, pdg type = -21 and color =8 2354 # - j_fks = the last colored particle 2355 bornproc = fksborn.born_matrix_element.get('processes')[0] 2356 pdgs = [l.get('id') for l in bornproc.get('legs')] + [-21] 2357 colors = [l.get('color') for l in bornproc.get('legs')] + [8] 2358 charges = [0.] * len(colors) 2359 2360 fks_i = len(colors) 2361 # use the last colored particle if it exists, or 2362 # just the last 2363 fks_j=1 2364 for cpos, col in enumerate(colors[:-1]): 2365 if col != 1: 2366 fks_j = cpos+1 2367 2368 fks_i_values = str(fks_i) 2369 fks_j_values = str(fks_j) 2370 col_lines = ['DATA (PARTICLE_TYPE_D(1, IPOS), IPOS=1, NEXTERNAL) / %s /' \ 2371 % ', '.join([str(col) for col in colors])] 2372 pdg_lines = ['DATA (PDG_TYPE_D(1, IPOS), IPOS=1, NEXTERNAL) / %s /' \ 2373 % ', '.join([str(pdg) for pdg in pdgs])] 2374 charge_lines = ['DATA (PARTICLE_CHARGE_D(1, IPOS), IPOS=1, NEXTERNAL) / %s /' \ 2375 % ', '.join('%19.15fd0' % charg for charg in charges)] 2376 fks_j_from_i_lines = ['DATA (FKS_J_FROM_I_D(1, %d, JPOS), JPOS = 0, 1) / 1, %d /' \ 2377 % (fks_i, fks_j)] 2378 2379 2380 replace_dict['fks_i_line'] = "data fks_i_D / %s /" % fks_i_values 2381 replace_dict['fks_j_line'] = "data fks_j_D / %s /" % fks_j_values 2382 replace_dict['col_lines'] = '\n'.join(col_lines) 2383 replace_dict['pdg_lines'] = '\n'.join(pdg_lines) 2384 replace_dict['charge_lines'] = '\n'.join(charge_lines) 2385 replace_dict['fks_j_from_i_lines'] = '\n'.join(fks_j_from_i_lines) 2386 2387 content = \ 2388 """ INTEGER IPOS, JPOS 2389 INTEGER FKS_I_D(%(nconfs)d), FKS_J_D(%(nconfs)d) 2390 INTEGER FKS_J_FROM_I_D(%(nconfs)d, NEXTERNAL, 0:NEXTERNAL) 2391 INTEGER PARTICLE_TYPE_D(%(nconfs)d, NEXTERNAL), PDG_TYPE_D(%(nconfs)d, NEXTERNAL) 2392 REAL*8 PARTICLE_CHARGE_D(%(nconfs)d, NEXTERNAL) 2393 2394 %(fks_i_line)s 2395 %(fks_j_line)s 2396 2397 %(fks_j_from_i_lines)s 2398 2399 C 2400 C Particle type: 2401 C octet = 8, triplet = 3, singlet = 1 2402 %(col_lines)s 2403 2404 C 2405 C Particle type according to PDG: 2406 C 2407 %(pdg_lines)s 2408 2409 C 2410 C Particle charge: 2411 C charge is set 0. with QCD corrections, which is irrelevant 2412 %(charge_lines)s 2413 """ % replace_dict 2414 if not isinstance(writer, writers.FortranWriter): 2415 raise writers.FortranWriter.FortranWriterError(\ 2416 "writer not FortranWriter") 2417 # Set lowercase/uppercase Fortran code 2418 writers.FortranWriter.downcase = False 2419 2420 writer.writelines(content) 2421 2422 return True
2423 2424 2425 #=============================================================================== 2426 # write_matrix_element_fks 2427 #=============================================================================== 2428 #test written
2429 - def write_matrix_element_fks(self, writer, matrix_element, n, fortran_model):
2430 """Export a matrix element to a matrix.f file in MG4 madevent format""" 2431 2432 if not matrix_element.get('processes') or \ 2433 not matrix_element.get('diagrams'): 2434 return 0,0 2435 2436 if not isinstance(writer, writers.FortranWriter): 2437 raise writers.FortranWriter.FortranWriterError(\ 2438 "writer not FortranWriter") 2439 # Set lowercase/uppercase Fortran code 2440 writers.FortranWriter.downcase = False 2441 2442 replace_dict = {} 2443 replace_dict['N_me'] = n 2444 2445 # Extract version number and date from VERSION file 2446 info_lines = self.get_mg5_info_lines() 2447 replace_dict['info_lines'] = info_lines 2448 2449 # Extract process info lines 2450 process_lines = self.get_process_info_lines(matrix_element) 2451 replace_dict['process_lines'] = process_lines 2452 2453 # Extract ncomb 2454 ncomb = matrix_element.get_helicity_combinations() 2455 replace_dict['ncomb'] = ncomb 2456 2457 # Extract helicity lines 2458 helicity_lines = self.get_helicity_lines(matrix_element) 2459 replace_dict['helicity_lines'] = helicity_lines 2460 2461 # Extract IC line 2462 ic_line = self.get_ic_line(matrix_element) 2463 replace_dict['ic_line'] = ic_line 2464 2465 # Extract overall denominator 2466 # Averaging initial state color, spin, and identical FS particles 2467 den_factor_line = self.get_den_factor_line(matrix_element) 2468 replace_dict['den_factor_line'] = den_factor_line 2469 2470 # Extract ngraphs 2471 ngraphs = matrix_element.get_number_of_amplitudes() 2472 replace_dict['ngraphs'] = ngraphs 2473 2474 # Extract ncolor 2475 ncolor = max(1, len(matrix_element.get('color_basis'))) 2476 replace_dict['ncolor'] = ncolor 2477 2478 # Extract color data lines 2479 color_data_lines = self.get_color_data_lines(matrix_element) 2480 replace_dict['color_data_lines'] = "\n".join(color_data_lines) 2481 2482 # Extract helas calls 2483 helas_calls = fortran_model.get_matrix_element_calls(\ 2484 matrix_element) 2485 replace_dict['helas_calls'] = "\n".join(helas_calls) 2486 2487 # Extract nwavefuncs (important to place after get_matrix_element_calls 2488 # so that 'me_id' is set) 2489 nwavefuncs = matrix_element.get_number_of_wavefunctions() 2490 replace_dict['nwavefuncs'] = nwavefuncs 2491 2492 # Extract amp2 lines 2493 amp2_lines = self.get_amp2_lines(matrix_element) 2494 replace_dict['amp2_lines'] = '\n'.join(amp2_lines) 2495 2496 # Set the size of Wavefunction 2497 if not self.model or any([p.get('spin') in [4,5] for p in self.model.get('particles') if p]): 2498 replace_dict['wavefunctionsize'] = 20 2499 else: 2500 replace_dict['wavefunctionsize'] = 8 2501 2502 # Extract JAMP lines 2503 jamp_lines = self.get_JAMP_lines(matrix_element) 2504 2505 replace_dict['jamp_lines'] = '\n'.join(jamp_lines) 2506 2507 realfile = open(os.path.join(_file_path, \ 2508 'iolibs/template_files/realmatrix_fks.inc')).read() 2509 2510 realfile = realfile % replace_dict 2511 2512 # Write the file 2513 writer.writelines(realfile) 2514 2515 return len(filter(lambda call: call.find('#') != 0, helas_calls)), ncolor
2516 2517 2518 #=============================================================================== 2519 # write_pdf_file 2520 #===============================================================================
2521 - def write_pdf_file(self, writer, matrix_element, n, fortran_model):
2522 #test written 2523 """Write the auto_dsig.f file for MadFKS, which contains 2524 pdf call information""" 2525 2526 if not matrix_element.get('processes') or \ 2527 not matrix_element.get('diagrams'): 2528 return 0 2529 2530 nexternal, ninitial = matrix_element.get_nexternal_ninitial() 2531 2532 if ninitial < 1 or ninitial > 2: 2533 raise writers.FortranWriter.FortranWriterError, \ 2534 """Need ninitial = 1 or 2 to write auto_dsig file""" 2535 2536 replace_dict = {} 2537 2538 replace_dict['N_me'] = n 2539 2540 # Extract version number and date from VERSION file 2541 info_lines = self.get_mg5_info_lines() 2542 replace_dict['info_lines'] = info_lines 2543 2544 # Extract process info lines 2545 process_lines = self.get_process_info_lines(matrix_element) 2546 replace_dict['process_lines'] = process_lines 2547 2548 pdf_vars, pdf_data, pdf_lines = \ 2549 self.get_pdf_lines_mir(matrix_element, ninitial, False, False) 2550 replace_dict['pdf_vars'] = pdf_vars 2551 replace_dict['pdf_data'] = pdf_data 2552 replace_dict['pdf_lines'] = pdf_lines 2553 2554 pdf_vars_mirr, pdf_data_mirr, pdf_lines_mirr = \ 2555 self.get_pdf_lines_mir(matrix_element, ninitial, False, True) 2556 replace_dict['pdf_lines_mirr'] = pdf_lines_mirr 2557 2558 file = open(os.path.join(_file_path, \ 2559 'iolibs/template_files/parton_lum_n_fks.inc')).read() 2560 file = file % replace_dict 2561 2562 # Write the file 2563 writer.writelines(file)
2564 2565 2566 2567 #=============================================================================== 2568 # write_coloramps_file 2569 #=============================================================================== 2570 #test written
2571 - def write_coloramps_file(self, writer, mapconfigs, matrix_element, fortran_model):
2572 """Write the coloramps.inc file for MadEvent""" 2573 2574 lines = [] 2575 lines.append( "logical icolamp(%d,%d,1)" % \ 2576 (max(len(matrix_element.get('color_basis').keys()), 1), 2577 len(mapconfigs))) 2578 2579 lines += self.get_icolamp_lines(mapconfigs, matrix_element, 1) 2580 2581 # Write the file 2582 writer.writelines(lines) 2583 2584 return True
2585 2586 2587 #=============================================================================== 2588 # write_leshouche_file 2589 #=============================================================================== 2590 #test written
2591 - def write_leshouche_file(self, writer, matrix_element, fortran_model):
2592 """Write the leshouche.inc file for MG4""" 2593 2594 # Extract number of external particles 2595 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 2596 2597 lines = [] 2598 for iproc, proc in enumerate(matrix_element.get('processes')): 2599 legs = proc.get_legs_with_decays() 2600 lines.append("DATA (IDUP(i,%d),i=1,%d)/%s/" % \ 2601 (iproc + 1, nexternal, 2602 ",".join([str(l.get('id')) for l in legs]))) 2603 for i in [1, 2]: 2604 lines.append("DATA (MOTHUP(%d,i,%3r),i=1,%2r)/%s/" % \ 2605 (i, iproc + 1, nexternal, 2606 ",".join([ "%3r" % 0 ] * ninitial + \ 2607 [ "%3r" % i ] * (nexternal - ninitial)))) 2608 2609 # Here goes the color connections corresponding to the JAMPs 2610 # Only one output, for the first subproc! 2611 if iproc == 0: 2612 # If no color basis, just output trivial color flow 2613 if not matrix_element.get('color_basis'): 2614 for i in [1, 2]: 2615 lines.append("DATA (ICOLUP(%d,i, 1),i=1,%2r)/%s/" % \ 2616 (i, nexternal, 2617 ",".join([ "%3r" % 0 ] * nexternal))) 2618 color_flow_list = [] 2619 2620 else: 2621 # First build a color representation dictionnary 2622 repr_dict = {} 2623 for l in legs: 2624 repr_dict[l.get('number')] = \ 2625 proc.get('model').get_particle(l.get('id')).get_color()\ 2626 * (-1)**(1+l.get('state')) 2627 # Get the list of color flows 2628 color_flow_list = \ 2629 matrix_element.get('color_basis').color_flow_decomposition(repr_dict, 2630 ninitial) 2631 # And output them properly 2632 for cf_i, color_flow_dict in enumerate(color_flow_list): 2633 for i in [0, 1]: 2634 lines.append("DATA (ICOLUP(%d,i,%3r),i=1,%2r)/%s/" % \ 2635 (i + 1, cf_i + 1, nexternal, 2636 ",".join(["%3r" % color_flow_dict[l.get('number')][i] \ 2637 for l in legs]))) 2638 2639 # Write the file 2640 writer.writelines(lines) 2641 2642 return len(color_flow_list)
2643 2644 2645 #=============================================================================== 2646 # write_configs_file 2647 #=============================================================================== 2648 #test_written
2649 - def write_configs_file(self, writer, matrix_element, fortran_model):
2650 """Write the configs.inc file for MadEvent""" 2651 2652 # Extract number of external particles 2653 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 2654 lines = [] 2655 2656 iconfig = 0 2657 2658 s_and_t_channels = [] 2659 mapconfigs = [] 2660 2661 model = matrix_element.get('processes')[0].get('model') 2662 # new_pdg = model.get_first_non_pdg() 2663 2664 base_diagrams = matrix_element.get('base_amplitude').get('diagrams') 2665 model = matrix_element.get('base_amplitude').get('process').get('model') 2666 minvert = min([max([len(vert.get('legs')) for vert in \ 2667 diag.get('vertices')]) for diag in base_diagrams]) 2668 2669 for idiag, diag in enumerate(base_diagrams): 2670 if any([len(vert.get('legs')) > minvert for vert in 2671 diag.get('vertices')]): 2672 # Only 3-vertices allowed in configs.inc 2673 continue 2674 iconfig = iconfig + 1 2675 helas_diag = matrix_element.get('diagrams')[idiag] 2676 mapconfigs.append(helas_diag.get('number')) 2677 lines.append("# Diagram %d, Amplitude %d" % \ 2678 (helas_diag.get('number'),helas_diag.get('amplitudes')[0]['number'])) 2679 # Correspondance between the config and the amplitudes 2680 lines.append("data mapconfig(%4d)/%4d/" % (iconfig, 2681 helas_diag.get('amplitudes')[0]['number'])) 2682 2683 # Need to reorganize the topology so that we start with all 2684 # final state external particles and work our way inwards 2685 schannels, tchannels = helas_diag.get('amplitudes')[0].\ 2686 get_s_and_t_channels(ninitial, model, 990) 2687 2688 s_and_t_channels.append([schannels, tchannels]) 2689 2690 # Write out propagators for s-channel and t-channel vertices 2691 allchannels = schannels 2692 if len(tchannels) > 1: 2693 # Write out tchannels only if there are any non-trivial ones 2694 allchannels = schannels + tchannels 2695 2696 for vert in allchannels: 2697 daughters = [leg.get('number') for leg in vert.get('legs')[:-1]] 2698 last_leg = vert.get('legs')[-1] 2699 lines.append("data (iforest(i,%3d,%4d),i=1,%d)/%s/" % \ 2700 (last_leg.get('number'), iconfig, len(daughters), 2701 ",".join(["%3d" % d for d in daughters]))) 2702 if vert in schannels: 2703 lines.append("data sprop(%4d,%4d)/%8d/" % \ 2704 (last_leg.get('number'), iconfig, 2705 last_leg.get('id'))) 2706 elif vert in tchannels[:-1]: 2707 lines.append("data tprid(%4d,%4d)/%8d/" % \ 2708 (last_leg.get('number'), iconfig, 2709 abs(last_leg.get('id')))) 2710 2711 # Write out number of configs 2712 lines.append("# Number of configs") 2713 lines.append("data mapconfig(0)/%4d/" % iconfig) 2714 2715 # Write the file 2716 writer.writelines(lines) 2717 2718 return iconfig, mapconfigs, s_and_t_channels
2719 2720 2721 #=============================================================================== 2722 # write_decayBW_file 2723 #=============================================================================== 2724 #test written
2725 - def write_decayBW_file(self, writer, s_and_t_channels):
2726 """Write the decayBW.inc file for MadEvent""" 2727 2728 lines = [] 2729 2730 booldict = {False: ".false.", True: ".false."} 2731 ####Changed by MZ 2011-11-23!!!! 2732 2733 for iconf, config in enumerate(s_and_t_channels): 2734 schannels = config[0] 2735 for vertex in schannels: 2736 # For the resulting leg, pick out whether it comes from 2737 # decay or not, as given by the from_group flag 2738 leg = vertex.get('legs')[-1] 2739 lines.append("data gForceBW(%d,%d)/%s/" % \ 2740 (leg.get('number'), iconf + 1, 2741 booldict[leg.get('from_group')])) 2742 2743 # Write the file 2744 writer.writelines(lines) 2745 2746 return True
2747 2748 2749 #=============================================================================== 2750 # write_dname_file 2751 #===============================================================================
2752 - def write_dname_file(self, writer, matrix_element, fortran_model):
2753 """Write the dname.mg file for MG4""" 2754 2755 line = "DIRNAME=P%s" % \ 2756 matrix_element.get('processes')[0].shell_string() 2757 2758 # Write the file 2759 writer.write(line + "\n") 2760 2761 return True
2762 2763 2764 #=============================================================================== 2765 # write_iproc_file 2766 #===============================================================================
2767 - def write_iproc_file(self, writer, me_number):
2768 """Write the iproc.dat file for MG4""" 2769 2770 line = "%d" % (me_number + 1) 2771 2772 # Write the file 2773 for line_to_write in writer.write_line(line): 2774 writer.write(line_to_write) 2775 return True
2776 2777 2778 #=============================================================================== 2779 # Helper functions 2780 #=============================================================================== 2781 2782 2783 #=============================================================================== 2784 # get_fks_j_from_i_lines 2785 #=============================================================================== 2786
2787 - def get_fks_j_from_i_lines(self, me, i = 0): #test written
2788 """generate the lines for fks.inc describing initializating the 2789 fks_j_from_i array""" 2790 lines = [] 2791 if not me.isfinite: 2792 for ii, js in me.fks_j_from_i.items(): 2793 if js: 2794 lines.append('DATA (FKS_J_FROM_I_D(%d, %d, JPOS), JPOS = 0, %d) / %d, %s /' \ 2795 % (i, ii, len(js), len(js), ', '.join(["%d" % j for j in js]))) 2796 else: 2797 lines.append('DATA (FKS_J_FROM_I_D(%d, JPOS), JPOS = 0, %d) / %d, %s /' \ 2798 % (2, 1, 1, '1')) 2799 lines.append('') 2800 2801 return lines 2802 2803 2804 #=============================================================================== 2805 # get_leshouche_lines 2806 #===============================================================================
2807 - def get_leshouche_lines(self, matrix_element, ime):
2808 #test written 2809 """Write the leshouche.inc file for MG4""" 2810 2811 # Extract number of external particles 2812 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 2813 2814 lines = [] 2815 for iproc, proc in enumerate(matrix_element.get('processes')): 2816 legs = proc.get_legs_with_decays() 2817 lines.append("I %4d %4d %s" % \ 2818 (ime, iproc + 1, 2819 " ".join([str(l.get('id')) for l in legs]))) 2820 for i in [1, 2]: 2821 lines.append("M %4d %4d %4d %s" % \ 2822 (ime, i, iproc + 1, 2823 " ".join([ "%3d" % 0 ] * ninitial + \ 2824 [ "%3d" % i ] * (nexternal - ninitial)))) 2825 2826 # Here goes the color connections corresponding to the JAMPs 2827 # Only one output, for the first subproc! 2828 if iproc == 0: 2829 # If no color basis, just output trivial color flow 2830 if not matrix_element.get('color_basis'): 2831 for i in [1, 2]: 2832 lines.append("C %4d %4d 1 %s" % \ 2833 (ime, i, 2834 " ".join([ "%3d" % 0 ] * nexternal))) 2835 color_flow_list = [] 2836 nflow = 1 2837 2838 else: 2839 # First build a color representation dictionnary 2840 repr_dict = {} 2841 for l in legs: 2842 repr_dict[l.get('number')] = \ 2843 proc.get('model').get_particle(l.get('id')).get_color()\ 2844 * (-1)**(1+l.get('state')) 2845 # Get the list of color flows 2846 color_flow_list = \ 2847 matrix_element.get('color_basis').color_flow_decomposition(repr_dict, 2848 ninitial) 2849 # And output them properly 2850 for cf_i, color_flow_dict in enumerate(color_flow_list): 2851 for i in [0, 1]: 2852 lines.append("C %4d %4d %4d %s" % \ 2853 (ime, i + 1, cf_i + 1, 2854 " ".join(["%3d" % color_flow_dict[l.get('number')][i] \ 2855 for l in legs]))) 2856 2857 nflow = len(color_flow_list) 2858 2859 nproc = len(matrix_element.get('processes')) 2860 2861 return lines, nproc, nflow
2862 2863
2864 - def get_leshouche_lines_dummy(self, matrix_element, ime):
2865 #test written 2866 """As get_leshouche_lines, but for 'fake' real emission processes (LOonly 2867 In this case, write born color structure times ij -> i,j splitting) 2868 """ 2869 2870 bornproc = matrix_element.get('processes')[0] 2871 colors = [l.get('color') for l in bornproc.get('legs')] 2872 2873 fks_i = len(colors) 2874 # use the last colored particle if it exists, or 2875 # just the last 2876 fks_j=1 2877 for cpos, col in enumerate(colors): 2878 if col != 1: 2879 fks_j = cpos+1 2880 2881 # Extract number of external particles 2882 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 2883 nexternal+=1 # remember, in this case matrix_element is born 2884 2885 lines = [] 2886 for iproc, proc in enumerate(matrix_element.get('processes')): 2887 # add the fake extra leg 2888 legs = proc.get_legs_with_decays() + \ 2889 [fks_common.FKSLeg({'id': -21, 2890 'number': nexternal, 2891 'state': True, 2892 'fks': 'i', 2893 'color': 8, 2894 'charge': 0., 2895 'massless': True, 2896 'spin': 3, 2897 'is_part': True, 2898 'self_antipart': True})] 2899 2900 lines.append("I %4d %4d %s" % \ 2901 (ime, iproc + 1, 2902 " ".join([str(l.get('id')) for l in legs]))) 2903 for i in [1, 2]: 2904 lines.append("M %4d %4d %4d %s" % \ 2905 (ime, i, iproc + 1, 2906 " ".join([ "%3d" % 0 ] * ninitial + \ 2907 [ "%3d" % i ] * (nexternal - ninitial)))) 2908 2909 # Here goes the color connections corresponding to the JAMPs 2910 # Only one output, for the first subproc! 2911 if iproc == 0: 2912 # If no color basis, just output trivial color flow 2913 if not matrix_element.get('color_basis'): 2914 for i in [1, 2]: 2915 lines.append("C %4d %4d 1 %s" % \ 2916 (ime, i, 2917 " ".join([ "%3d" % 0 ] * nexternal))) 2918 color_flow_list = [] 2919 nflow = 1 2920 2921 else: 2922 # in this case the last particle (-21) has two color indices 2923 # and it has to be emitted by j_fks 2924 # First build a color representation dictionnary 2925 repr_dict = {} 2926 for l in legs[:-1]: 2927 repr_dict[l.get('number')] = \ 2928 proc.get('model').get_particle(l.get('id')).get_color()\ 2929 * (-1)**(1+l.get('state')) 2930 # Get the list of color flows 2931 color_flow_list = \ 2932 matrix_element.get('color_basis').color_flow_decomposition(repr_dict, 2933 ninitial) 2934 # And output them properly 2935 for cf_i, color_flow_dict in enumerate(color_flow_list): 2936 # we have to add the extra leg (-21), linked to the j_fks leg 2937 # first, find the maximum color label 2938 maxicol = max(sum(color_flow_dict.values(), [])) 2939 #then, replace the color labels 2940 if color_flow_dict[fks_j][0] == 0: 2941 anti = True 2942 icol_j = color_flow_dict[fks_j][1] 2943 else: 2944 anti = False 2945 icol_j = color_flow_dict[fks_j][0] 2946 2947 if anti: 2948 color_flow_dict[nexternal] = (maxicol + 1, color_flow_dict[fks_j][1]) 2949 color_flow_dict[fks_j][1] = maxicol + 1 2950 else: 2951 color_flow_dict[nexternal] = (color_flow_dict[fks_j][0], maxicol + 1) 2952 color_flow_dict[fks_j][0] = maxicol + 1 2953 2954 for i in [0, 1]: 2955 lines.append("C %4d %4d %4d %s" % \ 2956 (ime, i + 1, cf_i + 1, 2957 " ".join(["%3d" % color_flow_dict[l.get('number')][i] \ 2958 for l in legs]))) 2959 2960 nflow = len(color_flow_list) 2961 2962 nproc = len(matrix_element.get('processes')) 2963 2964 return lines, nproc, nflow
2965 2966 2967 #=============================================================================== 2968 # get_den_factor_lines 2969 #===============================================================================
2970 - def get_den_factor_lines(self, fks_born):
2971 """returns the lines with the information on the denominator keeping care 2972 of the identical particle factors in the various real emissions""" 2973 2974 lines = [] 2975 info_list = fks_born.get_fks_info_list() 2976 if info_list: 2977 # if the reals have been generated, fill with the corresponding average factor 2978 lines.append('INTEGER IDEN_VALUES(%d)' % len(info_list)) 2979 lines.append('DATA IDEN_VALUES /' + \ 2980 ', '.join(['%d' % ( 2981 fks_born.born_matrix_element.get_denominator_factor() ) \ 2982 for info in info_list]) + '/') 2983 else: 2984 # otherwise use the born 2985 lines.append('INTEGER IDEN_VALUES(1)') 2986 lines.append('DATA IDEN_VALUES / %d /' \ 2987 % fks_born.born_matrix_element.get_denominator_factor()) 2988 2989 return lines
2990 2991 2992 #=============================================================================== 2993 # get_ij_lines 2994 #===============================================================================
2995 - def get_ij_lines(self, fks_born):
2996 """returns the lines with the information on the particle number of the born 2997 that splits""" 2998 info_list = fks_born.get_fks_info_list() 2999 lines = [] 3000 if info_list: 3001 # if the reals have been generated, fill with the corresponding value of ij if 3002 # ij is massless, or with 0 if ij is massive (no collinear singularity) 3003 ij_list = [info['fks_info']['ij']if \ 3004 fks_born.born_matrix_element['processes'][0]['legs'][info['fks_info']['ij']-1]['massless'] \ 3005 else 0 for info in info_list] 3006 lines.append('INTEGER IJ_VALUES(%d)' % len(info_list)) 3007 lines.append('DATA IJ_VALUES /' + ', '.join(['%d' % ij for ij in ij_list]) + '/') 3008 else: 3009 #otherwise just put the first leg 3010 lines.append('INTEGER IJ_VALUES(1)') 3011 lines.append('DATA IJ_VALUES / 1 /') 3012 3013 return lines
3014 3015
3016 - def get_pdf_lines_mir(self, matrix_element, ninitial, subproc_group = False,\ 3017 mirror = False): #test written
3018 """Generate the PDF lines for the auto_dsig.f file""" 3019 3020 processes = matrix_element.get('processes') 3021 model = processes[0].get('model') 3022 3023 pdf_definition_lines = "" 3024 pdf_data_lines = "" 3025 pdf_lines = "" 3026 3027 if ninitial == 1: 3028 pdf_lines = "PD(0) = 0d0\nIPROC = 0\n" 3029 for i, proc in enumerate(processes): 3030 process_line = proc.base_string() 3031 pdf_lines = pdf_lines + "IPROC=IPROC+1 ! " + process_line 3032 pdf_lines = pdf_lines + "\nPD(IPROC) = 1d0\n" 3033 pdf_lines = pdf_lines + "\nPD(0)=PD(0)+PD(IPROC)\n" 3034 else: 3035 # Pick out all initial state particles for the two beams 3036 initial_states = [sorted(list(set([p.get_initial_pdg(1) for \ 3037 p in processes]))), 3038 sorted(list(set([p.get_initial_pdg(2) for \ 3039 p in processes])))] 3040 3041 # Prepare all variable names 3042 pdf_codes = dict([(p, model.get_particle(p).get_name()) for p in \ 3043 sum(initial_states,[])]) 3044 for key,val in pdf_codes.items(): 3045 pdf_codes[key] = val.replace('~','x').replace('+','p').replace('-','m') 3046 3047 # Set conversion from PDG code to number used in PDF calls 3048 pdgtopdf = {21: 0, 22: 7} 3049 # Fill in missing entries of pdgtopdf 3050 for pdg in sum(initial_states,[]): 3051 if not pdg in pdgtopdf and not pdg in pdgtopdf.values(): 3052 pdgtopdf[pdg] = pdg 3053 elif pdg not in pdgtopdf and pdg in pdgtopdf.values(): 3054 # If any particle has pdg code 7, we need to use something else 3055 pdgtopdf[pdg] = 6000000 + pdg 3056 3057 # Get PDF variable declarations for all initial states 3058 for i in [0,1]: 3059 pdf_definition_lines += "DOUBLE PRECISION " + \ 3060 ",".join(["%s%d" % (pdf_codes[pdg],i+1) \ 3061 for pdg in \ 3062 initial_states[i]]) + \ 3063 "\n" 3064 3065 # Get PDF data lines for all initial states 3066 for i in [0,1]: 3067 pdf_data_lines += "DATA " + \ 3068 ",".join(["%s%d" % (pdf_codes[pdg],i+1) \ 3069 for pdg in initial_states[i]]) + \ 3070 "/%d*1D0/" % len(initial_states[i]) + \ 3071 "\n" 3072 3073 # Get PDF values for the different initial states 3074 for i, init_states in enumerate(initial_states): 3075 if not mirror: 3076 ibeam = i + 1 3077 else: 3078 ibeam = 2 - i 3079 if subproc_group: 3080 pdf_lines = pdf_lines + \ 3081 "IF (ABS(LPP(IB(%d))).GE.1) THEN\nLP=SIGN(1,LPP(IB(%d)))\n" \ 3082 % (ibeam, ibeam) 3083 else: 3084 pdf_lines = pdf_lines + \ 3085 "IF (ABS(LPP(%d)) .GE. 1) THEN\nLP=SIGN(1,LPP(%d))\n" \ 3086 % (ibeam, ibeam) 3087 3088 for initial_state in init_states: 3089 if initial_state in pdf_codes.keys(): 3090 if subproc_group: 3091 if abs(pdgtopdf[initial_state]) <= 7: 3092 pdf_lines = pdf_lines + \ 3093 ("%s%d=PDG2PDF(ABS(LPP(IB(%d))),%d*LP," + \ 3094 "XBK(IB(%d)),DSQRT(Q2FACT(%d)))\n") % \ 3095 (pdf_codes[initial_state], 3096 i + 1, ibeam, pdgtopdf[initial_state], 3097 ibeam, ibeam) 3098 else: 3099 # setting other partons flavours outside quark, gluon, photon to be 0d0 3100 pdf_lines = pdf_lines + \ 3101 ("c settings other partons flavours outside quark, gluon, photon to 0d0\n" + \ 3102 "%s%d=0d0\n") % \ 3103 (pdf_codes[initial_state],i + 1) 3104 else: 3105 if abs(pdgtopdf[initial_state]) <= 7: 3106 pdf_lines = pdf_lines + \ 3107 ("%s%d=PDG2PDF(ABS(LPP(%d)),%d*LP," + \ 3108 "XBK(%d),DSQRT(Q2FACT(%d)))\n") % \ 3109 (pdf_codes[initial_state], 3110 i + 1, ibeam, pdgtopdf[initial_state], 3111 ibeam, ibeam) 3112 else: 3113 # setting other partons flavours outside quark, gluon, photon to be 0d0 3114 pdf_lines = pdf_lines + \ 3115 ("c settings other partons flavours outside quark, gluon, photon to 0d0\n" + \ 3116 "%s%d=0d0\n") % \ 3117 (pdf_codes[initial_state],i + 1) 3118 3119 pdf_lines = pdf_lines + "ENDIF\n" 3120 3121 # Add up PDFs for the different initial state particles 3122 pdf_lines = pdf_lines + "PD(0) = 0d0\nIPROC = 0\n" 3123 for proc in processes: 3124 process_line = proc.base_string() 3125 pdf_lines = pdf_lines + "IPROC=IPROC+1 ! " + process_line 3126 pdf_lines = pdf_lines + "\nPD(IPROC) = " 3127 for ibeam in [1, 2]: 3128 initial_state = proc.get_initial_pdg(ibeam) 3129 if initial_state in pdf_codes.keys(): 3130 pdf_lines = pdf_lines + "%s%d*" % \ 3131 (pdf_codes[initial_state], ibeam) 3132 else: 3133 pdf_lines = pdf_lines + "1d0*" 3134 # Remove last "*" from pdf_lines 3135 pdf_lines = pdf_lines[:-1] + "\n" 3136 3137 # Remove last line break from pdf_lines 3138 return pdf_definition_lines[:-1], pdf_data_lines[:-1], pdf_lines[:-1] 3139 3140 3141 #test written
3142 - def get_color_data_lines_from_color_matrix(self, color_matrix, n=6):
3143 """Return the color matrix definition lines for the given color_matrix. Split 3144 rows in chunks of size n.""" 3145 3146 if not color_matrix: 3147 return ["DATA Denom(1)/1/", "DATA (CF(i,1),i=1,1) /1/"] 3148 else: 3149 ret_list = [] 3150 my_cs = color.ColorString() 3151 for index, denominator in \ 3152 enumerate(color_matrix.get_line_denominators()): 3153 # First write the common denominator for this color matrix line 3154 ret_list.append("DATA Denom(%i)/%i/" % (index + 1, denominator)) 3155 # Then write the numerators for the matrix elements 3156 num_list = color_matrix.get_line_numerators(index, denominator) 3157 for k in xrange(0, len(num_list), n): 3158 ret_list.append("DATA (CF(i,%3r),i=%3r,%3r) /%s/" % \ 3159 (index + 1, k + 1, min(k + n, len(num_list)), 3160 ','.join(["%5r" % i for i in num_list[k:k + n]]))) 3161 3162 return ret_list
3163 3164 #=========================================================================== 3165 # write_maxamps_file 3166 #===========================================================================
3167 - def write_maxamps_file(self, writer, maxamps, maxflows, 3168 maxproc,maxsproc):
3169 """Write the maxamps.inc file for MG4.""" 3170 3171 file = " integer maxamps, maxflow, maxproc, maxsproc\n" 3172 file = file + "parameter (maxamps=%d, maxflow=%d)\n" % \ 3173 (maxamps, maxflows) 3174 file = file + "parameter (maxproc=%d, maxsproc=%d)" % \ 3175 (maxproc, maxsproc) 3176 3177 # Write the file 3178 writer.writelines(file) 3179 3180 return True
3181 3182 #=============================================================================== 3183 # write_ncombs_file 3184 #===============================================================================
3185 - def write_ncombs_file(self, writer, matrix_element, fortran_model):
3186 # #test written 3187 """Write the ncombs.inc file for MadEvent.""" 3188 3189 # Extract number of external particles 3190 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 3191 3192 # ncomb (used for clustering) is 2^(nexternal) 3193 file = " integer n_max_cl\n" 3194 file = file + "parameter (n_max_cl=%d)" % (2 ** (nexternal+1)) 3195 3196 # Write the file 3197 writer.writelines(file) 3198 3199 return True
3200 3201 #=========================================================================== 3202 # write_config_subproc_map_file 3203 #===========================================================================
3204 - def write_config_subproc_map_file(self, writer, s_and_t_channels):
3205 """Write a dummy config_subproc.inc file for MadEvent""" 3206 3207 lines = [] 3208 3209 for iconfig in range(len(s_and_t_channels)): 3210 lines.append("DATA CONFSUB(1,%d)/1/" % \ 3211 (iconfig + 1)) 3212 3213 # Write the file 3214 writer.writelines(lines) 3215 3216 return True
3217 3218 #=========================================================================== 3219 # write_colors_file 3220 #===========================================================================
3221 - def write_colors_file(self, writer, matrix_element):
3222 """Write the get_color.f file for MadEvent, which returns color 3223 for all particles used in the matrix element.""" 3224 3225 try: 3226 matrix_elements=matrix_element.real_processes[0].matrix_element 3227 except IndexError: 3228 matrix_elements=[matrix_element.born_matrix_element] 3229 3230 if isinstance(matrix_elements, helas_objects.HelasMatrixElement): 3231 matrix_elements = [matrix_elements] 3232 3233 model = matrix_elements[0].get('processes')[0].get('model') 3234 3235 # We need the both particle and antiparticle wf_ids, since the identity 3236 # depends on the direction of the wf. 3237 # loop on the real emissions 3238 wf_ids = set(sum([sum([sum([sum([[wf.get_pdg_code(),wf.get_anti_pdg_code()] \ 3239 for wf in d.get('wavefunctions')],[]) \ 3240 for d in me.get('diagrams')],[]) \ 3241 for me in [real_proc.matrix_element]],[])\ 3242 for real_proc in matrix_element.real_processes],[])) 3243 # and also on the born 3244 wf_ids = wf_ids.union(set(sum([sum([[wf.get_pdg_code(),wf.get_anti_pdg_code()] \ 3245 for wf in d.get('wavefunctions')],[]) \ 3246 for d in matrix_element.born_matrix_element.get('diagrams')],[]))) 3247 3248 # loop on the real emissions 3249 leg_ids = set(sum([sum([sum([[l.get('id') for l in \ 3250 p.get_legs_with_decays()] for p in \ 3251 me.get('processes')], []) for me in \ 3252 [real_proc.matrix_element]], []) for real_proc in \ 3253 matrix_element.real_processes],[])) 3254 # and also on the born 3255 leg_ids = leg_ids.union(set(sum([[l.get('id') for l in \ 3256 p.get_legs_with_decays()] for p in \ 3257 matrix_element.born_matrix_element.get('processes')], []))) 3258 particle_ids = sorted(list(wf_ids.union(leg_ids))) 3259 3260 lines = """function get_color(ipdg) 3261 implicit none 3262 integer get_color, ipdg 3263 3264 if(ipdg.eq.%d)then 3265 get_color=%d 3266 return 3267 """ % (particle_ids[0], model.get_particle(particle_ids[0]).get_color()) 3268 3269 for part_id in particle_ids[1:]: 3270 lines += """else if(ipdg.eq.%d)then 3271 get_color=%d 3272 return 3273 """ % (part_id, model.get_particle(part_id).get_color()) 3274 # Dummy particle for multiparticle vertices with pdg given by 3275 # first code not in the model 3276 lines += """else if(ipdg.eq.%d)then 3277 c This is dummy particle used in multiparticle vertices 3278 get_color=2 3279 return 3280 """ % model.get_first_non_pdg() 3281 lines += """else 3282 write(*,*)'Error: No color given for pdg ',ipdg 3283 get_color=0 3284 return 3285 endif 3286 end 3287 """ 3288 3289 # Write the file 3290 writer.writelines(lines) 3291 3292 return True
3293 3294 #=============================================================================== 3295 # write_props_file 3296 #=============================================================================== 3297 #test_written
3298 - def write_props_file(self, writer, matrix_element, fortran_model, s_and_t_channels):
3299 """Write the props.inc file for MadEvent. Needs input from 3300 write_configs_file. With respect to the parent routine, it has some 3301 more specific formats that allow the props.inc file to be read by the 3302 link program""" 3303 3304 lines = [] 3305 3306 particle_dict = matrix_element.get('processes')[0].get('model').\ 3307 get('particle_dict') 3308 3309 for iconf, configs in enumerate(s_and_t_channels): 3310 for vertex in configs[0] + configs[1][:-1]: 3311 leg = vertex.get('legs')[-1] 3312 if leg.get('id') not in particle_dict: 3313 # Fake propagator used in multiparticle vertices 3314 mass = 'zero' 3315 width = 'zero' 3316 pow_part = 0 3317 else: 3318 particle = particle_dict[leg.get('id')] 3319 # Get mass 3320 if particle.get('mass').lower() == 'zero': 3321 mass = particle.get('mass') 3322 else: 3323 mass = "abs(%s)" % particle.get('mass') 3324 # Get width 3325 if particle.get('width').lower() == 'zero': 3326 width = particle.get('width') 3327 else: 3328 width = "abs(%s)" % particle.get('width') 3329 3330 pow_part = 1 + int(particle.is_boson()) 3331 3332 lines.append("pmass(%3d,%4d) = %s" % \ 3333 (leg.get('number'), iconf + 1, mass)) 3334 lines.append("pwidth(%3d,%4d) = %s" % \ 3335 (leg.get('number'), iconf + 1, width)) 3336 lines.append("pow(%3d,%4d) = %d" % \ 3337 (leg.get('number'), iconf + 1, pow_part)) 3338 3339 # Write the file 3340 writer.writelines(lines) 3341 3342 return True
3343 3344 3345 #=========================================================================== 3346 # write_subproc 3347 #===========================================================================
3348 - def write_subproc(self, writer, subprocdir):
3349 """Append this subprocess to the subproc.mg file for MG4""" 3350 3351 # Write line to file 3352 writer.write(subprocdir + "\n") 3353 3354 return True
3355 3356 3357 3358 3359 3360 #================================================================================= 3361 # Class for using the optimized Loop process 3362 #=================================================================================
3363 -class ProcessOptimizedExporterFortranFKS(loop_exporters.LoopProcessOptimizedExporterFortranSA,\ 3364 ProcessExporterFortranFKS):
3365 """Class to take care of exporting a set of matrix elements to 3366 Fortran (v4) format.""" 3367 3368
3369 - def finalize(self, *args, **opts):
3371 #export_v4.ProcessExporterFortranSA.finalize(self, *args, **opts) 3372 3373 #=============================================================================== 3374 # copy the Template in a new directory. 3375 #===============================================================================
3376 - def copy_fkstemplate(self):
3377 """create the directory run_name as a copy of the MadEvent 3378 Template, and clean the directory 3379 For now it is just the same as copy_v4template, but it will be modified 3380 """ 3381 mgme_dir = self.mgme_dir 3382 dir_path = self.dir_path 3383 clean =self.opt['clean'] 3384 3385 #First copy the full template tree if dir_path doesn't exit 3386 if not os.path.isdir(dir_path): 3387 if not mgme_dir: 3388 raise MadGraph5Error, \ 3389 "No valid MG_ME path given for MG4 run directory creation." 3390 logger.info('initialize a new directory: %s' % \ 3391 os.path.basename(dir_path)) 3392 shutil.copytree(os.path.join(mgme_dir, 'Template', 'NLO'), dir_path, True) 3393 # distutils.dir_util.copy_tree since dir_path already exists 3394 dir_util.copy_tree(pjoin(self.mgme_dir, 'Template', 'Common'), 3395 dir_path) 3396 # Copy plot_card 3397 for card in ['plot_card']: 3398 if os.path.isfile(pjoin(self.dir_path, 'Cards',card + '.dat')): 3399 try: 3400 shutil.copy(pjoin(self.dir_path, 'Cards', card + '.dat'), 3401 pjoin(self.dir_path, 'Cards', card + '_default.dat')) 3402 except IOError: 3403 logger.warning("Failed to copy " + card + ".dat to default") 3404 3405 elif not os.path.isfile(os.path.join(dir_path, 'TemplateVersion.txt')): 3406 if not mgme_dir: 3407 raise MadGraph5Error, \ 3408 "No valid MG_ME path given for MG4 run directory creation." 3409 try: 3410 shutil.copy(os.path.join(mgme_dir, 'MGMEVersion.txt'), dir_path) 3411 except IOError: 3412 MG5_version = misc.get_pkg_info() 3413 open(os.path.join(dir_path, 'MGMEVersion.txt'), 'w').write( \ 3414 "5." + MG5_version['version']) 3415 3416 #Ensure that the Template is clean 3417 if clean: 3418 logger.info('remove old information in %s' % os.path.basename(dir_path)) 3419 if os.environ.has_key('MADGRAPH_BASE'): 3420 subprocess.call([os.path.join('bin', 'internal', 'clean_template'), 3421 '--web'], cwd=dir_path) 3422 else: 3423 try: 3424 subprocess.call([os.path.join('bin', 'internal', 'clean_template')], \ 3425 cwd=dir_path) 3426 except Exception, why: 3427 raise MadGraph5Error('Failed to clean correctly %s: \n %s' \ 3428 % (os.path.basename(dir_path),why)) 3429 #Write version info 3430 MG_version = misc.get_pkg_info() 3431 open(os.path.join(dir_path, 'SubProcesses', 'MGVersion.txt'), 'w').write( 3432 MG_version['version']) 3433 3434 # We must link the CutTools to the Library folder of the active Template 3435 self.link_CutTools(dir_path) 3436 # We must link the TIR to the Library folder of the active Template 3437 link_tir_libs=[] 3438 tir_libs=[] 3439 tir_include=[] 3440 for tir in self.all_tir: 3441 tir_dir="%s_dir"%tir 3442 libpath=getattr(self,tir_dir) 3443 libpath = self.link_TIR(os.path.join(self.dir_path, 'lib'), 3444 libpath,"lib%s.a"%tir,tir_name=tir) 3445 setattr(self,tir_dir,libpath) 3446 if libpath != "": 3447 if tir in ['pjfry','ninja','golem', 'samurai','collier']: 3448 # We should link dynamically when possible, so we use the original 3449 # location of these libraries. 3450 link_tir_libs.append('-L%s/ -l%s'%(libpath,tir)) 3451 tir_libs.append('%s/lib%s.$(libext)'%(libpath,tir)) 3452 # For Ninja, we must also link against OneLoop. 3453 if tir in ['ninja']: 3454 if not any(os.path.isfile(pjoin(libpath,'libavh_olo.%s'%ext)) 3455 for ext in ['a','dylib','so']): 3456 raise MadGraph5Error( 3457 "The OneLOop library 'libavh_olo.(a|dylib|so)' could no be found in path '%s'. Please place a symlink to it there."%libpath) 3458 link_tir_libs.append('-L%s/ -l%s'%(libpath,'avh_olo')) 3459 tir_libs.append('%s/lib%s.$(libext)'%(libpath,'avh_olo')) 3460 # We must add the corresponding includes for these TIR 3461 if tir in ['golem','samurai','ninja','collier']: 3462 trg_path = pjoin(os.path.dirname(libpath),'include') 3463 if os.path.isdir(trg_path): 3464 to_include = misc.find_includes_path(trg_path, 3465 self.include_names[tir]) 3466 else: 3467 to_include = None 3468 # Special possible location for collier 3469 if to_include is None and tir=='collier': 3470 to_include = misc.find_includes_path( 3471 pjoin(libpath,'modules'),self.include_names[tir]) 3472 if to_include is None: 3473 logger.error( 3474 'Could not find the include directory for %s, looking in %s.\n' % (tir ,str(trg_path))+ 3475 'Generation carries on but you will need to edit the include path by hand in the makefiles.') 3476 to_include = '<Not_found_define_it_yourself>' 3477 tir_include.append('-I %s'%to_include) 3478 else: 3479 link_tir_libs.append('-l%s'%tir) 3480 tir_libs.append('$(LIBDIR)lib%s.$(libext)'%tir) 3481 3482 os.remove(os.path.join(self.dir_path,'SubProcesses','makefile_loop.inc')) 3483 cwd = os.getcwd() 3484 dirpath = os.path.join(self.dir_path, 'SubProcesses') 3485 try: 3486 os.chdir(dirpath) 3487 except os.error: 3488 logger.error('Could not cd to directory %s' % dirpath) 3489 return 0 3490 filename = 'makefile_loop' 3491 calls = self.write_makefile_TIR(writers.MakefileWriter(filename), 3492 link_tir_libs,tir_libs,tir_include=tir_include) 3493 os.remove(os.path.join(self.dir_path,'Source','make_opts.inc')) 3494 dirpath = os.path.join(self.dir_path, 'Source') 3495 try: 3496 os.chdir(dirpath) 3497 except os.error: 3498 logger.error('Could not cd to directory %s' % dirpath) 3499 return 0 3500 filename = 'make_opts' 3501 calls = self.write_make_opts(writers.MakefileWriter(filename), 3502 link_tir_libs,tir_libs) 3503 # Return to original PWD 3504 os.chdir(cwd) 3505 3506 cwd = os.getcwd() 3507 dirpath = os.path.join(self.dir_path, 'SubProcesses') 3508 try: 3509 os.chdir(dirpath) 3510 except os.error: 3511 logger.error('Could not cd to directory %s' % dirpath) 3512 return 0 3513 3514 # We add here the user-friendly MadLoop option setter. 3515 cpfiles= ["SubProcesses/MadLoopParamReader.f", 3516 "Cards/MadLoopParams.dat", 3517 "SubProcesses/MadLoopParams.inc"] 3518 3519 for file in cpfiles: 3520 shutil.copy(os.path.join(self.loop_dir,'StandAlone/', file), 3521 os.path.join(self.dir_path, file)) 3522 3523 shutil.copy(pjoin(self.dir_path, 'Cards','MadLoopParams.dat'), 3524 pjoin(self.dir_path, 'Cards','MadLoopParams_default.dat')) 3525 3526 3527 3528 if os.path.exists(pjoin(self.dir_path, 'Cards', 'MadLoopParams.dat')): 3529 self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.dir_path, 3530 'Cards', 'MadLoopParams.dat')) 3531 # write the output file 3532 self.MadLoopparam.write(pjoin(self.dir_path,"SubProcesses", 3533 "MadLoopParams.dat")) 3534 3535 # We need minimal editing of MadLoopCommons.f 3536 MadLoopCommon = open(os.path.join(self.loop_dir,'StandAlone', 3537 "SubProcesses","MadLoopCommons.inc")).read() 3538 writer = writers.FortranWriter(os.path.join(self.dir_path, 3539 "SubProcesses","MadLoopCommons.f")) 3540 writer.writelines(MadLoopCommon%{ 3541 'print_banner_commands':self.MadLoop_banner}, 3542 context={'collier_available':self.tir_available_dict['collier']}) 3543 writer.close() 3544 3545 # link the files from the MODEL 3546 model_path = self.dir_path + '/Source/MODEL/' 3547 # Note that for the [real=] mode, these files are not present 3548 if os.path.isfile(os.path.join(model_path,'mp_coupl.inc')): 3549 ln(model_path + '/mp_coupl.inc', self.dir_path + '/SubProcesses') 3550 if os.path.isfile(os.path.join(model_path,'mp_coupl_same_name.inc')): 3551 ln(model_path + '/mp_coupl_same_name.inc', \ 3552 self.dir_path + '/SubProcesses') 3553 3554 # Write the cts_mpc.h and cts_mprec.h files imported from CutTools 3555 self.write_mp_files(writers.FortranWriter('cts_mprec.h'),\ 3556 writers.FortranWriter('cts_mpc.h'),) 3557 3558 self.copy_python_files() 3559 3560 3561 # We need to create the correct open_data for the pdf 3562 self.write_pdf_opendata() 3563 3564 3565 # Return to original PWD 3566 os.chdir(cwd)
3567
3568 - def generate_virt_directory(self, loop_matrix_element, fortran_model, dir_name):
3569 """writes the V**** directory inside the P**** directories specified in 3570 dir_name""" 3571 3572 cwd = os.getcwd() 3573 3574 matrix_element = loop_matrix_element 3575 3576 # Create the MadLoop5_resources directory if not already existing 3577 dirpath = os.path.join(dir_name, 'MadLoop5_resources') 3578 try: 3579 os.mkdir(dirpath) 3580 except os.error as error: 3581 logger.warning(error.strerror + " " + dirpath) 3582 3583 # Create the directory PN_xx_xxxxx in the specified path 3584 name = "V%s" % matrix_element.get('processes')[0].shell_string() 3585 dirpath = os.path.join(dir_name, name) 3586 3587 try: 3588 os.mkdir(dirpath) 3589 except os.error as error: 3590 logger.warning(error.strerror + " " + dirpath) 3591 3592 try: 3593 os.chdir(dirpath) 3594 except os.error: 3595 logger.error('Could not cd to directory %s' % dirpath) 3596 return 0 3597 3598 logger.info('Creating files in directory %s' % name) 3599 3600 # Extract number of external particles 3601 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 3602 3603 calls=self.write_loop_matrix_element_v4(None,matrix_element,fortran_model) 3604 3605 # We need a link to coefs.inc from DHELAS 3606 ln(pjoin(self.dir_path, 'Source', 'DHELAS', 'coef_specs.inc'), 3607 abspath=False, cwd=None) 3608 3609 # The born matrix element, if needed 3610 filename = 'born_matrix.f' 3611 calls = self.write_bornmatrix( 3612 writers.FortranWriter(filename), 3613 matrix_element, 3614 fortran_model) 3615 3616 filename = 'nexternal.inc' 3617 self.write_nexternal_file(writers.FortranWriter(filename), 3618 nexternal, ninitial) 3619 3620 filename = 'pmass.inc' 3621 self.write_pmass_file(writers.FortranWriter(filename), 3622 matrix_element) 3623 3624 filename = 'ngraphs.inc' 3625 self.write_ngraphs_file(writers.FortranWriter(filename), 3626 len(matrix_element.get_all_amplitudes())) 3627 3628 filename = "loop_matrix.ps" 3629 writers.FortranWriter(filename).writelines("""C Post-helas generation loop-drawing is not ready yet.""") 3630 plot = draw.MultiEpsDiagramDrawer(base_objects.DiagramList( 3631 matrix_element.get('base_amplitude').get('loop_diagrams')[:1000]), 3632 filename, 3633 model=matrix_element.get('processes')[0].get('model'), 3634 amplitude='') 3635 logger.info("Drawing loop Feynman diagrams for " + \ 3636 matrix_element.get('processes')[0].nice_string(\ 3637 print_weighted=False)) 3638 plot.draw() 3639 3640 filename = "born_matrix.ps" 3641 plot = draw.MultiEpsDiagramDrawer(matrix_element.get('base_amplitude').\ 3642 get('born_diagrams'), 3643 filename, 3644 model=matrix_element.get('processes')[0].\ 3645 get('model'), 3646 amplitude='') 3647 logger.info("Generating born Feynman diagrams for " + \ 3648 matrix_element.get('processes')[0].nice_string(\ 3649 print_weighted=False)) 3650 plot.draw() 3651 3652 # We also need to write the overall maximum quantities for this group 3653 # of processes in 'global_specs.inc'. In aMCatNLO, there is always 3654 # only one process, so this is trivial 3655 self.write_global_specs(matrix_element, output_path=pjoin(dirpath,'global_specs.inc')) 3656 3657 open('unique_id.inc','w').write( 3658 """ integer UNIQUE_ID 3659 parameter(UNIQUE_ID=1)""") 3660 3661 linkfiles = ['coupl.inc', 'mp_coupl.inc', 'mp_coupl_same_name.inc', 3662 'cts_mprec.h', 'cts_mpc.h', 'MadLoopParamReader.f', 3663 'MadLoopParams.inc','MadLoopCommons.f'] 3664 3665 for file in linkfiles: 3666 ln('../../%s' % file) 3667 3668 os.system("ln -s ../../makefile_loop makefile") 3669 3670 # We should move to MadLoop5_resources directory from the SubProcesses 3671 ln(pjoin(os.path.pardir,os.path.pardir,'MadLoopParams.dat'), 3672 pjoin('..','MadLoop5_resources')) 3673 3674 linkfiles = ['mpmodule.mod'] 3675 3676 for file in linkfiles: 3677 ln('../../../lib/%s' % file) 3678 3679 linkfiles = ['coef_specs.inc'] 3680 3681 for file in linkfiles: 3682 ln('../../../Source/DHELAS/%s' % file) 3683 3684 # Return to original PWD 3685 os.chdir(cwd) 3686 3687 if not calls: 3688 calls = 0 3689 return calls
3690 3691 3692 #=============================================================================== 3693 # write_coef_specs 3694 #===============================================================================
3695 - def write_coef_specs_file(self, max_loop_vertex_ranks):
3696 """ writes the coef_specs.inc in the DHELAS folder. Should not be called in the 3697 non-optimized mode""" 3698 filename = os.path.join(self.dir_path, 'Source', 'DHELAS', 'coef_specs.inc') 3699 3700 replace_dict = {} 3701 replace_dict['max_lwf_size'] = 4 3702 replace_dict['vertex_max_coefs'] = max(\ 3703 [q_polynomial.get_number_of_coefs_for_rank(n) 3704 for n in max_loop_vertex_ranks]) 3705 IncWriter=writers.FortranWriter(filename,'w') 3706 IncWriter.writelines("""INTEGER MAXLWFSIZE 3707 PARAMETER (MAXLWFSIZE=%(max_lwf_size)d) 3708 INTEGER VERTEXMAXCOEFS 3709 PARAMETER (VERTEXMAXCOEFS=%(vertex_max_coefs)d)"""\ 3710 % replace_dict) 3711 IncWriter.close()
3712