Package madgraph :: Package loop :: Module loop_exporters
[hide private]
[frames] | no frames]

Source Code for Module madgraph.loop.loop_exporters

   1  ################################################################################ 
   2  # 
   3  # Copyright (c) 2009 The MadGraph5_aMC@NLO Development team and Contributors 
   4  # 
   5  # This file is a part of the MadGraph5_aMC@NLO project, an application which  
   6  # automatically generates Feynman diagrams and matrix elements for arbitrary 
   7  # high-energy processes in the Standard Model and beyond. 
   8  # 
   9  # It is subject to the MadGraph5_aMC@NLO license which should accompany this  
  10  # distribution. 
  11  # 
  12  # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch 
  13  # 
  14  ################################################################################ 
  15  """Methods and classes to export matrix elements to v4 format.""" 
  16   
  17  import copy 
  18  import fractions 
  19  import glob 
  20  import logging 
  21  import os 
  22  import stat 
  23  import sys 
  24  import re 
  25  import shutil 
  26  import subprocess 
  27  import itertools 
  28  import time 
  29  import datetime 
  30   
  31   
  32  import aloha 
  33   
  34  import madgraph.core.base_objects as base_objects 
  35  import madgraph.core.color_algebra as color 
  36  import madgraph.core.helas_objects as helas_objects 
  37  import madgraph.loop.loop_helas_objects as loop_helas_objects 
  38  import madgraph.iolibs.drawing_eps as draw 
  39  import madgraph.iolibs.files as files 
  40  import madgraph.iolibs.group_subprocs as group_subprocs 
  41  import madgraph.various.banner as banner_mod 
  42  import madgraph.various.misc as misc 
  43  import madgraph.various.q_polynomial as q_polynomial 
  44  import madgraph.iolibs.file_writers as writers 
  45  import madgraph.iolibs.gen_infohtml as gen_infohtml 
  46  import madgraph.iolibs.template_files as template_files 
  47  import madgraph.iolibs.ufo_expression_parsers as parsers 
  48  import madgraph.iolibs.export_v4 as export_v4 
  49  import madgraph.various.diagram_symmetry as diagram_symmetry 
  50  import madgraph.various.process_checks as process_checks 
  51  import madgraph.various.progressbar as pbar 
  52  import madgraph.various.q_polynomial as q_polynomial 
  53  import madgraph.core.color_amp as color_amp 
  54  import madgraph.iolibs.helas_call_writers as helas_call_writers 
  55  import models.check_param_card as check_param_card 
  56  from madgraph.loop.loop_base_objects import LoopDiagram 
  57  from madgraph.loop.MadLoopBannerStyles import MadLoopBannerStyles 
  58   
  59   
  60   
  61  pjoin = os.path.join 
  62   
  63  import aloha.create_aloha as create_aloha 
  64  import models.write_param_card as param_writer 
  65  from madgraph import MadGraph5Error, MG5DIR, InvalidCmd 
  66  from madgraph.iolibs.files import cp, ln, mv 
  67  pjoin = os.path.join 
  68  _file_path = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0] + '/' 
  69  logger = logging.getLogger('madgraph.loop_exporter') 
  70   
  71  #=============================================================================== 
  72  # LoopExporterFortran 
  73  #=============================================================================== 
74 -class LoopExporterFortran(object):
75 """ Class to define general helper functions to the different 76 loop fortran exporters (ME, SA, MEGroup, etc..) which will inherit both 77 from this class AND from the corresponding ProcessExporterFortran(ME,SA,...). 78 It plays the same role as ProcessExporterFrotran and simply defines here 79 loop-specific helpers functions necessary for all loop exporters. 80 Notice that we do not have LoopExporterFortran inheriting from 81 ProcessExporterFortran but give access to arguments like dir_path and 82 clean using options. This avoids method resolution object ambiguity""" 83 84 default_opt = dict(export_v4.ProcessExporterFortran.default_opt) 85 default_opt.update({'clean': False, 'complex_mass':False, 86 'export_format':'madloop', 'mp':True, 87 'loop_dir':'', 'cuttools_dir':'', 88 'fortran_compiler':'gfortran', 89 'SubProc_prefix': 'P', 90 'output_dependencies': 'external', 91 'compute_color_flows': False, 92 'mode':''}) 93 94 include_names = {'ninja' : 'mninja.mod', 95 'golem' : 'generic_function_1p.mod', 96 'samurai':'msamurai.mod', 97 'collier': 'collier.mod'} 98
99 - def __init__(self, dir_path = "", opt=None):
100 """Initiate the LoopExporterFortran with directory information on where 101 to find all the loop-related source files, like CutTools""" 102 103 104 self.opt = dict(self.default_opt) 105 if opt: 106 self.opt.update(opt) 107 108 self.SubProc_prefix = self.opt['SubProc_prefix'] 109 self.loop_dir = self.opt['loop_dir'] 110 self.cuttools_dir = self.opt['cuttools_dir'] 111 self.fortran_compiler = self.opt['fortran_compiler'] 112 self.dependencies = self.opt['output_dependencies'] 113 self.compute_color_flows = self.opt['compute_color_flows'] 114 115 super(LoopExporterFortran,self).__init__(dir_path, self.opt)
116 117 191
192 - def get_aloha_model(self, model):
193 """ Caches the aloha model created here as an attribute of the loop 194 exporter so that it can later be used in the LoopHelasMatrixElement 195 in the function compute_all_analytic_information for recycling aloha 196 computations across different LoopHelasMatrixElements steered by the 197 same loop exporter. 198 """ 199 if not hasattr(self, 'aloha_model'): 200 self.aloha_model = create_aloha.AbstractALOHAModel(os.path.basename(model.get('modelpath'))) 201 return self.aloha_model
202 203 #=========================================================================== 204 # write the multiple-precision header files 205 #===========================================================================
206 - def write_mp_files(self, writer_mprec, writer_mpc):
207 """Write the cts_mprec.h and cts_mpc.h""" 208 209 file = open(os.path.join(self.cuttools_dir, 'src/cts/cts_mprec.h')).read() 210 writer_mprec.writelines(file) 211 212 file = open(os.path.join(self.cuttools_dir, 'src/cts/cts_mpc.h')).read() 213 file = file.replace('&','') 214 writer_mpc.writelines(file) 215 216 return True
217 218 #=============================================================================== 219 # LoopProcessExporterFortranSA 220 #===============================================================================
221 -class LoopProcessExporterFortranSA(LoopExporterFortran, 222 export_v4.ProcessExporterFortranSA):
223 224 """Class to take care of exporting a set of loop matrix elements in the 225 Fortran format.""" 226 227 template_dir=os.path.join(_file_path,'iolibs/template_files/loop') 228 madloop_makefile_name = 'makefile' 229 230 MadLoop_banner = MadLoopBannerStyles.get_MadLoop_Banner( 231 style='classic2', color='green', 232 top_frame_char = '=', bottom_frame_char = '=', 233 left_frame_char = '{',right_frame_char = '}', 234 print_frame=True, side_margin = 7, up_margin = 1) 235
236 - def __init__(self, *args, **opts):
237 super(LoopProcessExporterFortranSA,self).__init__(*args,**opts) 238 self.unique_id=0 # to allow collier to distinguish the various loop subprocesses 239 self.has_loop_induced = False
240
241 - def copy_template(self, model):
242 """Additional actions needed to setup the Template. 243 """ 244 super(LoopProcessExporterFortranSA, self).copy_template(model) 245 246 self.loop_additional_template_setup()
247
248 - def finalize(self, matrix_element, cmdhistory, MG5options, outputflag):
249 """create the global information for loops""" 250 251 super(LoopProcessExporterFortranSA,self).finalize(matrix_element, 252 cmdhistory, MG5options, outputflag) 253 254 255 MLCard = banner_mod.MadLoopParam(pjoin(self.dir_path, 'Cards', 'MadLoopParams.dat')) 256 # For loop-induced processes and *only* when summing over all helicity configurations 257 # (which is the default for standalone usage), COLLIER is faster than Ninja. 258 if self.has_loop_induced: 259 MLCard['MLReductionLib'] = "7|6|1" 260 # Computing the poles with COLLIER also unnecessarily slows down the code 261 # It should only be set to True for checks and it's acceptable to remove them 262 # here because for loop-induced processes they should be zero anyway. 263 # We keep it active for non-loop induced processes because COLLIER is not the 264 # main reduction tool in that case, and the poles wouldn't be zero then 265 MLCard['COLLIERComputeUVpoles'] = False 266 MLCard['COLLIERComputeIRpoles'] = False 267 268 MLCard.write(pjoin(self.dir_path, 'Cards', 'MadLoopParams_default.dat')) 269 MLCard.write(pjoin(self.dir_path, 'Cards', 'MadLoopParams.dat'))
270
271 - def write_f2py_makefile(self):
272 return
273
274 - def write_f2py_check_sa(self, matrix_element, output_path):
275 """ Write the general check_sa.py in SubProcesses that calls all processes successively.""" 276 277 # No need to further edit this file for now. 278 file = open(os.path.join(self.template_dir,\ 279 'check_sa_all.py.inc')).read() 280 open(output_path,'w').writelines(file) 281 # Make it executable 282 os.chmod(output_path, os.stat(output_path).st_mode | stat.S_IEXEC)
283 284
285 - def write_f2py_splitter(self):
286 """write a function to call the correct matrix element""" 287 288 template = """ 289 %(python_information)s 290 291 SUBROUTINE INITIALISE(PATH) 292 C ROUTINE FOR F2PY to read the benchmark point. 293 IMPLICIT NONE 294 CHARACTER*512 PATH 295 CF2PY INTENT(IN) :: PATH 296 CALL SETPARA(PATH) !first call to setup the paramaters 297 RETURN 298 END 299 300 SUBROUTINE SET_MADLOOP_PATH(PATH) 301 C Routine to set the path of the folder 'MadLoop5_resources' to MadLoop 302 CHARACTER(512) PATH 303 CF2PY intent(in)::path 304 CALL SETMADLOOPPATH(PATH) 305 END 306 307 subroutine smatrixhel(pdgs, npdg, p, ALPHAS, SCALES2, nhel, ANS, RETURNCODE) 308 IMPLICIT NONE 309 310 CF2PY double precision, intent(in), dimension(0:3,npdg) :: p 311 CF2PY integer, intent(in), dimension(npdg) :: pdgs 312 CF2PY integer, intent(in) :: npdg 313 CF2PY double precision, intent(out) :: ANS 314 CF2PY integer, intent(out) :: RETURNCODE 315 CF2PY double precision, intent(in) :: ALPHAS 316 CF2PY double precision, intent(in) :: SCALES2 317 318 integer pdgs(*) 319 integer npdg, nhel, RETURNCODE 320 double precision p(*) 321 double precision ANS, ALPHAS, PI,SCALES2 322 323 %(smatrixhel)s 324 325 return 326 end 327 328 subroutine get_pdg_order(OUT) 329 IMPLICIT NONE 330 CF2PY INTEGER, intent(out) :: OUT(%(nb_me)i,%(maxpart)i) 331 332 INTEGER OUT(%(nb_me)i,%(maxpart)i), PDGS(%(nb_me)i,%(maxpart)i) 333 DATA PDGS/ %(pdgs)s / 334 OUT=PDGS 335 RETURN 336 END 337 338 subroutine get_prefix(PREFIX) 339 IMPLICIT NONE 340 CF2PY CHARACTER*20, intent(out) :: PREFIX(%(nb_me)i) 341 character*20 PREFIX(%(nb_me)i),PREF(%(nb_me)i) 342 DATA PREF / '%(prefix)s'/ 343 PREFIX = PREF 344 RETURN 345 END 346 347 """ 348 349 allids = self.prefix_info.keys() 350 allprefix = [self.prefix_info[key][0] for key in allids] 351 min_nexternal = min([len(ids) for ids in allids]) 352 max_nexternal = max([len(ids) for ids in allids]) 353 354 info = [] 355 for key, (prefix, tag) in self.prefix_info.items(): 356 info.append('#PY %s : %s # %s' % (tag, key, prefix)) 357 358 359 text = [] 360 for n_ext in range(min_nexternal, max_nexternal+1): 361 current = [ids for ids in allids if len(ids)==n_ext] 362 if not current: 363 continue 364 if min_nexternal != max_nexternal: 365 if n_ext == min_nexternal: 366 text.append(' if (npdg.eq.%i)then' % n_ext) 367 else: 368 text.append(' else if (npdg.eq.%i)then' % n_ext) 369 for ii,pdgs in enumerate(current): 370 condition = '.and.'.join(['%i.eq.pdgs(%i)' %(pdg, i+1) for i, pdg in enumerate(pdgs)]) 371 if ii==0: 372 text.append( ' if(%s) then ! %i' % (condition, i)) 373 else: 374 text.append( ' else if(%s) then ! %i' % (condition,i)) 375 text.append(' call %sget_me(p, ALPHAS, DSQRT(SCALES2), NHEL, ANS, RETURNCODE)' % self.prefix_info[pdgs][0]) 376 text.append(' endif') 377 #close the function 378 if min_nexternal != max_nexternal: 379 text.append('endif') 380 381 formatting = {'python_information':'\n'.join(info), 382 'smatrixhel': '\n'.join(text), 383 'maxpart': max_nexternal, 384 'nb_me': len(allids), 385 'pdgs': ','.join([str(pdg[i]) if i<len(pdg) else '0' 386 for i in range(max_nexternal) \ 387 for pdg in allids]), 388 'prefix':'\',\''.join(allprefix) 389 } 390 391 392 text = template % formatting 393 fsock = writers.FortranWriter(pjoin(self.dir_path, 'SubProcesses', 'all_matrix.f'),'w') 394 fsock.writelines(text) 395 fsock.close()
396 397 398
399 - def loop_additional_template_setup(self, copy_Source_makefile = True):
400 """ Perform additional actions specific for this class when setting 401 up the template with the copy_template function.""" 402 403 # We must change some files to their version for NLO computations 404 cpfiles= ["Cards/MadLoopParams.dat", 405 "SubProcesses/MadLoopParamReader.f", 406 "SubProcesses/MadLoopParams.inc"] 407 if copy_Source_makefile: 408 cpfiles.append("Source/makefile") 409 410 for file in cpfiles: 411 shutil.copy(os.path.join(self.loop_dir,'StandAlone/', file), 412 os.path.join(self.dir_path, file)) 413 414 cp(pjoin(self.loop_dir,'StandAlone/Cards/MadLoopParams.dat'), 415 pjoin(self.dir_path, 'Cards/MadLoopParams_default.dat')) 416 417 ln(pjoin(self.dir_path, 'Cards','MadLoopParams.dat'), pjoin(self.dir_path,'SubProcesses')) 418 419 # We might need to give a different name to the MadLoop makefile 420 shutil.copy(pjoin(self.loop_dir,'StandAlone','SubProcesses','makefile'), 421 pjoin(self.dir_path, 'SubProcesses',self.madloop_makefile_name)) 422 423 # Write SubProcesses/MadLoop_makefile_definitions with dummy variables 424 # for the non-optimized output 425 link_tir_libs=[] 426 tir_libs=[] 427 428 filePath = pjoin(self.dir_path, 'SubProcesses', 429 'MadLoop_makefile_definitions') 430 calls = self.write_loop_makefile_definitions( 431 writers.MakefileWriter(filePath),link_tir_libs,tir_libs) 432 433 # We need minimal editing of MadLoopCommons.f 434 # For the optimized output, this file will be overwritten once the 435 # availability of COLLIER has been determined. 436 MadLoopCommon = open(os.path.join(self.loop_dir,'StandAlone', 437 "SubProcesses","MadLoopCommons.inc")).read() 438 writer = writers.FortranWriter(os.path.join(self.dir_path, 439 "SubProcesses","MadLoopCommons.f")) 440 writer.writelines(MadLoopCommon%{ 441 'print_banner_commands':self.MadLoop_banner}, context={ 442 'collier_available':False}) 443 writer.close() 444 445 # Copy the whole MadLoop5_resources directory (empty at this stage) 446 if not os.path.exists(pjoin(self.dir_path,'SubProcesses', 447 'MadLoop5_resources')): 448 cp(pjoin(self.loop_dir,'StandAlone','SubProcesses', 449 'MadLoop5_resources'),pjoin(self.dir_path,'SubProcesses')) 450 451 # Link relevant cards from Cards inside the MadLoop5_resources 452 ln(pjoin(self.dir_path,'SubProcesses','MadLoopParams.dat'), 453 pjoin(self.dir_path,'SubProcesses','MadLoop5_resources')) 454 ln(pjoin(self.dir_path,'Cards','param_card.dat'), 455 pjoin(self.dir_path,'SubProcesses','MadLoop5_resources')) 456 ln(pjoin(self.dir_path,'Cards','ident_card.dat'), 457 pjoin(self.dir_path,'SubProcesses','MadLoop5_resources')) 458 459 # And remove check_sa in the SubProcess folder since now there is a 460 # check_sa tailored to each subprocess. 461 if os.path.isfile(pjoin(self.dir_path,'SubProcesses','check_sa.f')): 462 os.remove(pjoin(self.dir_path,'SubProcesses','check_sa.f')) 463 464 cwd = os.getcwd() 465 dirpath = os.path.join(self.dir_path, 'SubProcesses') 466 try: 467 os.chdir(dirpath) 468 except os.error: 469 logger.error('Could not cd to directory %s' % dirpath) 470 return 0 471 472 # Write the cts_mpc.h and cts_mprec.h files imported from CutTools 473 self.write_mp_files(writers.FortranWriter('cts_mprec.h'),\ 474 writers.FortranWriter('cts_mpc.h')) 475 476 # Return to original PWD 477 os.chdir(cwd) 478 479 # We must link the CutTools to the Library folder of the active Template 480 super(LoopProcessExporterFortranSA, self).link_CutTools(self.dir_path)
481 482 # This function is placed here and not in optimized exporterd, 483 # because the same makefile.inc should be used in all cases.
484 - def write_loop_makefile_definitions(self, writer, link_tir_libs, 485 tir_libs,tir_include=[]):
486 """ Create the file makefile which links to the TIR libraries.""" 487 488 file = open(os.path.join(self.loop_dir,'StandAlone', 489 'SubProcesses','MadLoop_makefile_definitions.inc')).read() 490 replace_dict={} 491 replace_dict['link_tir_libs']=' '.join(link_tir_libs) 492 replace_dict['tir_libs']=' '.join(tir_libs) 493 replace_dict['dotf']='%.f' 494 replace_dict['prefix']= self.SubProc_prefix 495 replace_dict['doto']='%.o' 496 replace_dict['tir_include']=' '.join(tir_include) 497 file=file%replace_dict 498 if writer: 499 writer.writelines(file) 500 else: 501 return file
502
503 - def convert_model(self, model, wanted_lorentz = [], 504 wanted_couplings = []):
505 """ Caches the aloha model created here when writing out the aloha 506 fortran subroutine. 507 """ 508 self.get_aloha_model(model) 509 super(LoopProcessExporterFortranSA, self).convert_model(model, 510 wanted_lorentz = wanted_lorentz, wanted_couplings = wanted_couplings)
511
512 - def get_ME_identifier(self, matrix_element, 513 group_number = None, group_elem_number = None):
514 """ A function returning a string uniquely identifying the matrix 515 element given in argument so that it can be used as a prefix to all 516 MadLoop5 subroutines and common blocks related to it. This allows 517 to compile several processes into one library as requested by the 518 BLHA (Binoth LesHouches Accord) guidelines. 519 The arguments group_number and proc_id are just for the LoopInduced 520 output with MadEvent.""" 521 522 # When disabling the loop grouping in the LoopInduced MadEvent output, 523 # we have only the group_number set and the proc_id set to None. In this 524 # case we don't print the proc_id. 525 if (not group_number is None) and group_elem_number is None: 526 return 'ML5_%d_%s_'%(matrix_element.get('processes')[0].get('id'), 527 group_number) 528 elif group_number is None or group_elem_number is None: 529 return 'ML5_%d_'%matrix_element.get('processes')[0].get('id') 530 else: 531 return 'ML5_%d_%s_%s_'%(matrix_element.get('processes')[0].get('id'), 532 group_number, group_elem_number)
533
534 - def get_SubProc_folder_name(self, process, 535 group_number = None, group_elem_number = None):
536 """Returns the name of the SubProcess directory, which can contain 537 the process goup and group element number for the case of loop-induced 538 integration with MadEvent.""" 539 540 # When disabling the loop grouping in the LoopInduced MadEvent output, 541 # we have only the group_number set and the proc_id set to None. In this 542 # case we don't print the proc_id. 543 if not group_number is None and group_elem_number is None: 544 return "%s%d_%s_%s"%(self.SubProc_prefix, process.get('id'), 545 group_number,process.shell_string(print_id=False)) 546 elif group_number is None or group_elem_number is None: 547 return "%s%s" %(self.SubProc_prefix,process.shell_string()) 548 else: 549 return "%s%d_%s_%s_%s"%(self.SubProc_prefix, process.get('id'), 550 group_number, group_elem_number,process.shell_string(print_id=False))
551 552 #=========================================================================== 553 # Set the compiler to be gfortran for the loop processes. 554 #===========================================================================
555 - def compiler_choice(self, compiler=export_v4.default_compiler):
556 """ Different daughter classes might want different compilers. 557 Here, the gfortran compiler is used throughout the compilation 558 (mandatory for CutTools written in f90) """ 559 if isinstance(compiler, str): 560 fortran_compiler = compiler 561 compiler = export_v4.default_compiler 562 compiler['fortran'] = fortran_compiler 563 564 if not compiler['fortran'] is None and not \ 565 any([name in compiler['fortran'] for name in \ 566 ['gfortran','ifort']]): 567 logger.info('For loop processes, the compiler must be fortran90'+\ 568 'compatible, like gfortran.') 569 compiler['fortran'] = 'gfortran' 570 self.set_compiler(compiler,True) 571 else: 572 self.set_compiler(compiler) 573 574 self.set_cpp_compiler(compiler['cpp'])
575
576 - def turn_to_mp_calls(self, helas_calls_list):
577 # Prepend 'MP_' to all the helas calls in helas_calls_list. 578 # Might look like a brutal unsafe implementation, but it is not as 579 # these calls are built from the properties of the HELAS objects and 580 # whether they are evaluated in double or quad precision is none of 581 # their business but only relevant to the output algorithm. 582 # Also the cast to complex masses DCMPLX(*) must be replaced by 583 # CMPLX(*,KIND=16) 584 MP=re.compile(r"(?P<toSub>^.*CALL\s+)",re.IGNORECASE | re.MULTILINE) 585 586 def replaceWith(match_obj): 587 return match_obj.group('toSub')+'MP_'
588 589 DCMPLX=re.compile(r"DCMPLX\((?P<toSub>([^\)]*))\)",\ 590 re.IGNORECASE | re.MULTILINE) 591 592 for i, helas_call in enumerate(helas_calls_list): 593 new_helas_call=MP.sub(replaceWith,helas_call) 594 helas_calls_list[i]=DCMPLX.sub(r"CMPLX(\g<toSub>,KIND=16)",\ 595 new_helas_call)
596 600 608
609 - def make(self):
610 """ Compiles the additional dependences for loop (such as CutTools).""" 611 super(LoopProcessExporterFortranSA, self).make() 612 613 # make CutTools (only necessary with MG option output_dependencies='internal') 614 libdir = os.path.join(self.dir_path,'lib') 615 sourcedir = os.path.join(self.dir_path,'Source') 616 if self.dependencies=='internal': 617 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 618 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 619 if os.path.exists(pjoin(sourcedir,'CutTools')): 620 logger.info('Compiling CutTools (can take a couple of minutes) ...') 621 misc.compile(['CutTools','-j1'], cwd = sourcedir, nb_core=1) 622 logger.info(' ...done.') 623 else: 624 raise MadGraph5Error('Could not compile CutTools because its'+\ 625 ' source directory could not be found in the SOURCE folder.') 626 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 627 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 628 raise MadGraph5Error('CutTools compilation failed.') 629 630 # Verify compatibility between current compiler and the one which was 631 # used when last compiling CutTools (if specified). 632 compiler_log_path = pjoin(os.path.dirname((os.path.realpath(pjoin( 633 libdir, 'libcts.a')))),'compiler_version.log') 634 if os.path.exists(compiler_log_path): 635 compiler_version_used = open(compiler_log_path,'r').read() 636 if not str(misc.get_gfortran_version(misc.detect_current_compiler(\ 637 pjoin(sourcedir,'make_opts')))) in compiler_version_used: 638 if os.path.exists(pjoin(sourcedir,'CutTools')): 639 logger.info('CutTools was compiled with a different fortran'+\ 640 ' compiler. Re-compiling it now...') 641 misc.compile(['cleanCT'], cwd = sourcedir) 642 misc.compile(['CutTools','-j1'], cwd = sourcedir, nb_core=1) 643 logger.info(' ...done.') 644 else: 645 raise MadGraph5Error("CutTools installation in %s"\ 646 %os.path.realpath(pjoin(libdir, 'libcts.a'))+\ 647 " seems to have been compiled with a different compiler than"+\ 648 " the one specified in MG5_aMC. Please recompile CutTools.")
649
650 - def cat_coeff(self, ff_number, frac, is_imaginary, Nc_power, Nc_value=3):
651 """Concatenate the coefficient information to reduce it to 652 (fraction, is_imaginary) """ 653 654 total_coeff = ff_number * frac * fractions.Fraction(Nc_value) ** Nc_power 655 656 return (total_coeff, is_imaginary)
657
658 - def get_amp_to_jamp_map(self, col_amps, n_amps):
659 """ Returns a list with element 'i' being a list of tuples corresponding 660 to all apparition of amplitude number 'i' in the jamp number 'j' 661 with coeff 'coeff_j'. The format of each tuple describing an apparition 662 is (j, coeff_j). where coeff_j is of the form (Fraction, is_imag).""" 663 664 if(isinstance(col_amps,list)): 665 if(col_amps and isinstance(col_amps[0],list)): 666 color_amplitudes=col_amps 667 else: 668 raise MadGraph5Error, "Incorrect col_amps argument passed to get_amp_to_jamp_map" 669 else: 670 raise MadGraph5Error, "Incorrect col_amps argument passed to get_amp_to_jamp_map" 671 672 # To store the result 673 res_list = [[] for i in range(n_amps)] 674 for i, coeff_list in enumerate(color_amplitudes): 675 for (coefficient, amp_number) in coeff_list: 676 res_list[amp_number-1].append((i,self.cat_coeff(\ 677 coefficient[0],coefficient[1],coefficient[2],coefficient[3]))) 678 679 return res_list
680
681 - def get_color_matrix(self, matrix_element):
682 """Return the color matrix definition lines. This color matrix is of size 683 NLOOPAMPSxNBORNAMPS and allows for squaring individually each Loop and Born 684 amplitude.""" 685 686 logger.info('Computing diagram color coefficients') 687 688 # The two lists have a list of tuples at element 'i' which correspond 689 # to all apparitions of loop amplitude number 'i' in the jampl number 'j' 690 # with coeff 'coeffj'. The format of each tuple describing an apparition 691 # is (j, coeffj). 692 ampl_to_jampl=self.get_amp_to_jamp_map(\ 693 matrix_element.get_loop_color_amplitudes(), 694 matrix_element.get_number_of_loop_amplitudes()) 695 if matrix_element.get('processes')[0].get('has_born'): 696 ampb_to_jampb=self.get_amp_to_jamp_map(\ 697 matrix_element.get_born_color_amplitudes(), 698 matrix_element.get_number_of_born_amplitudes()) 699 else: 700 ampb_to_jampb=ampl_to_jampl 701 # Below is the original color matrix multiplying the JAMPS 702 if matrix_element.get('color_matrix'): 703 ColorMatrixDenom = \ 704 matrix_element.get('color_matrix').get_line_denominators() 705 ColorMatrixNum = [ matrix_element.get('color_matrix').\ 706 get_line_numerators(index, denominator) for 707 (index, denominator) in enumerate(ColorMatrixDenom) ] 708 else: 709 ColorMatrixDenom= [1] 710 ColorMatrixNum = [[1]] 711 712 # Below is the final color matrix output 713 ColorMatrixNumOutput=[] 714 ColorMatrixDenomOutput=[] 715 716 # Now we construct the color factors between each born and loop amplitude 717 # by scanning their contributions to the different jamps. 718 start = time.time() 719 progress_bar = None 720 time_info = False 721 for i, jampl_list in enumerate(ampl_to_jampl): 722 # This can be pretty long for processes with many color flows. 723 # So, if necessary (i.e. for more than 15s), we tell the user the 724 # estimated time for the processing. 725 if i==5: 726 elapsed_time = time.time()-start 727 t = len(ampl_to_jampl)*(elapsed_time/5.0) 728 if t > 10.0: 729 time_info = True 730 logger.info('The color factors computation will take '+\ 731 ' about %s to run. '%str(datetime.timedelta(seconds=int(t)))+\ 732 'Started on %s.'%datetime.datetime.now().strftime(\ 733 "%d-%m-%Y %H:%M")) 734 if logger.getEffectiveLevel()<logging.WARNING: 735 widgets = ['Color computation:', pbar.Percentage(), ' ', 736 pbar.Bar(),' ', pbar.ETA(), ' '] 737 progress_bar = pbar.ProgressBar(widgets=widgets, 738 maxval=len(ampl_to_jampl), fd=sys.stdout) 739 740 if not progress_bar is None: 741 progress_bar.update(i+1) 742 # Flush to force the printout of the progress_bar to be updated 743 sys.stdout.flush() 744 745 line_num=[] 746 line_denom=[] 747 748 # Treat the special case where this specific amplitude contributes to no 749 # color flow at all. So it is zero because of color but not even due to 750 # an accidental cancellation among color flows, but simply because of its 751 # projection to each individual color flow is zero. In such case, the 752 # corresponding jampl_list is empty and all color coefficients must then 753 # be zero. This happens for example in the Higgs Effective Theory model 754 # for the bubble made of a 4-gluon vertex and the effective ggH vertex. 755 if len(jampl_list)==0: 756 line_num=[0]*len(ampb_to_jampb) 757 line_denom=[1]*len(ampb_to_jampb) 758 ColorMatrixNumOutput.append(line_num) 759 ColorMatrixDenomOutput.append(line_denom) 760 continue 761 762 for jampb_list in ampb_to_jampb: 763 real_num=0 764 imag_num=0 765 common_denom=color_amp.ColorMatrix.lcmm(*[abs(ColorMatrixDenom[jampl]* 766 ampl_coeff[0].denominator*ampb_coeff[0].denominator) for 767 ((jampl, ampl_coeff),(jampb,ampb_coeff)) in 768 itertools.product(jampl_list,jampb_list)]) 769 for ((jampl, ampl_coeff),(jampb, ampb_coeff)) in \ 770 itertools.product(jampl_list,jampb_list): 771 # take the numerator and multiply by lcm/denominator 772 # as we will later divide by the lcm. 773 buff_num=ampl_coeff[0].numerator*\ 774 ampb_coeff[0].numerator*ColorMatrixNum[jampl][jampb]*\ 775 abs(common_denom)/(ampl_coeff[0].denominator*\ 776 ampb_coeff[0].denominator*ColorMatrixDenom[jampl]) 777 # Remember that we must take the complex conjugate of 778 # the born jamp color coefficient because we will compute 779 # the square with 2 Re(LoopAmp x BornAmp*) 780 if ampl_coeff[1] and ampb_coeff[1]: 781 real_num=real_num+buff_num 782 elif not ampl_coeff[1] and not ampb_coeff[1]: 783 real_num=real_num+buff_num 784 elif not ampl_coeff[1] and ampb_coeff[1]: 785 imag_num=imag_num-buff_num 786 else: 787 imag_num=imag_num+buff_num 788 assert not (real_num!=0 and imag_num!=0), "MadGraph5_aMC@NLO found a "+\ 789 "color matrix element which has both a real and imaginary part." 790 if imag_num!=0: 791 res=fractions.Fraction(imag_num,common_denom) 792 line_num.append(res.numerator) 793 # Negative denominator means imaginary color coef of the 794 # final color matrix 795 line_denom.append(res.denominator*-1) 796 else: 797 res=fractions.Fraction(real_num,common_denom) 798 line_num.append(res.numerator) 799 # Positive denominator means real color coef of the final color matrix 800 line_denom.append(res.denominator) 801 802 ColorMatrixNumOutput.append(line_num) 803 ColorMatrixDenomOutput.append(line_denom) 804 805 if time_info: 806 logger.info('Finished on %s.'%datetime.datetime.now().strftime(\ 807 "%d-%m-%Y %H:%M")) 808 if progress_bar!=None: 809 progress_bar.finish() 810 811 return (ColorMatrixNumOutput,ColorMatrixDenomOutput)
812
813 - def get_context(self,matrix_element):
814 """ Returns the contextual variables which need to be set when 815 pre-processing the template files.""" 816 817 # The nSquaredSO entry of the general replace dictionary should have 818 # been set in write_loopmatrix prior to the first call to this function 819 # However, for cases where the TIRCaching contextual variable is 820 # irrelevant (like in the default output), this might not be the case 821 # so we set it to 1. 822 try: 823 n_squared_split_orders = matrix_element.rep_dict['nSquaredSO'] 824 except (KeyError, AttributeError): 825 n_squared_split_orders = 1 826 827 LoopInduced = not matrix_element.get('processes')[0].get('has_born') 828 self.has_loop_induced = max(LoopInduced, self.has_loop_induced) 829 # Force the computation of loop color flows for loop_induced processes 830 ComputeColorFlows = self.compute_color_flows or LoopInduced 831 # The variable AmplitudeReduction is just to make the contextual 832 # conditions more readable in the include files. 833 AmplitudeReduction = LoopInduced or ComputeColorFlows 834 # Even when not reducing at the amplitude level, the TIR caching 835 # is useful when there is more than one squared split order config. 836 TIRCaching = AmplitudeReduction or n_squared_split_orders>1 837 MadEventOutput = False 838 return {'LoopInduced': LoopInduced, 839 'ComputeColorFlows': ComputeColorFlows, 840 'AmplitudeReduction': AmplitudeReduction, 841 'TIRCaching': TIRCaching, 842 'MadEventOutput': MadEventOutput}
843 844 845 #=========================================================================== 846 # generate_subprocess_directory 847 #===========================================================================
848 - def generate_loop_subprocess(self, matrix_element, fortran_model, 849 group_number = None, proc_id = None, config_map=None, unique_id=None):
850 """Generate the Pxxxxx directory for a loop subprocess in MG4 standalone, 851 including the necessary loop_matrix.f, born_matrix.f and include files. 852 Notice that this is too different from generate_subprocess_directory 853 so that there is no point reusing this mother function. 854 The 'group_number' and 'proc_id' options are only used for the LoopInduced 855 MadEvent output and only to specify the ME_identifier and the P* 856 SubProcess directory name.""" 857 858 cwd = os.getcwd() 859 proc_dir_name = self.get_SubProc_folder_name( 860 matrix_element.get('processes')[0],group_number,proc_id) 861 dirpath = os.path.join(self.dir_path, 'SubProcesses', proc_dir_name) 862 863 try: 864 os.mkdir(dirpath) 865 except os.error as error: 866 logger.warning(error.strerror + " " + dirpath) 867 868 try: 869 os.chdir(dirpath) 870 except os.error: 871 logger.error('Could not cd to directory %s' % dirpath) 872 return 0 873 874 logger.info('Creating files in directory %s' % dirpath) 875 876 if unique_id is None: 877 raise MadGraph5Error, 'A unique id must be provided to the function'+\ 878 'generate_loop_subprocess of LoopProcessExporterFortranSA.' 879 # Create an include with the unique consecutive ID assigned 880 open('unique_id.inc','w').write( 881 """ integer UNIQUE_ID 882 parameter(UNIQUE_ID=%d)"""%unique_id) 883 884 # Extract number of external particles 885 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 886 887 calls=self.write_loop_matrix_element_v4(None,matrix_element, 888 fortran_model, group_number = group_number, 889 proc_id = proc_id, config_map = config_map) 890 891 # We assume here that all processes must share the same property of 892 # having a born or not, which must be true anyway since these are two 893 # definite different classes of processes which can never be treated on 894 # the same footing. 895 if matrix_element.get('processes')[0].get('has_born'): 896 filename = 'born_matrix.f' 897 calls = self.write_bornmatrix( 898 writers.FortranWriter(filename), 899 matrix_element, 900 fortran_model) 901 902 filename = 'pmass.inc' 903 self.write_pmass_file(writers.FortranWriter(filename), 904 matrix_element) 905 906 filename = 'ngraphs.inc' 907 self.write_ngraphs_file(writers.FortranWriter(filename), 908 len(matrix_element.get_all_amplitudes())) 909 910 # Do not draw the loop diagrams if they are too many. 911 # The user can always decide to do it manually, if really needed 912 loop_diags = [loop_diag for loop_diag in\ 913 matrix_element.get('base_amplitude').get('loop_diagrams')\ 914 if isinstance(loop_diag,LoopDiagram) and loop_diag.get('type') > 0] 915 if len(loop_diags)>5000: 916 logger.info("There are more than 5000 loop diagrams."+\ 917 "Only the first 5000 are drawn.") 918 filename = "loop_matrix.ps" 919 plot = draw.MultiEpsDiagramDrawer(base_objects.DiagramList( 920 loop_diags[:5000]),filename, 921 model=matrix_element.get('processes')[0].get('model'),amplitude='') 922 logger.info("Drawing loop Feynman diagrams for " + \ 923 matrix_element.get('processes')[0].nice_string()) 924 plot.draw() 925 926 if matrix_element.get('processes')[0].get('has_born'): 927 filename = "born_matrix.ps" 928 plot = draw.MultiEpsDiagramDrawer(matrix_element.get('base_amplitude').\ 929 get('born_diagrams'), 930 filename, 931 model=matrix_element.get('processes')[0].\ 932 get('model'), 933 amplitude='') 934 logger.info("Generating born Feynman diagrams for " + \ 935 matrix_element.get('processes')[0].nice_string(\ 936 print_weighted=False)) 937 plot.draw() 938 939 self.link_files_from_Subprocesses(self.get_SubProc_folder_name( 940 matrix_element.get('processes')[0],group_number,proc_id)) 941 942 # Return to original PWD 943 os.chdir(cwd) 944 945 if not calls: 946 calls = 0 947 return calls
948 969
970 - def generate_general_replace_dict(self,matrix_element, 971 group_number = None, proc_id = None):
972 """Generates the entries for the general replacement dictionary used 973 for the different output codes for this exporter.The arguments 974 group_number and proc_id are just for the LoopInduced output with MadEvent.""" 975 976 dict={} 977 # A general process prefix which appears in front of all MadLooop 978 # subroutines and common block so that several processes can be compiled 979 # together into one library, as necessary to follow BLHA guidelines. 980 981 dict['proc_prefix'] = self.get_ME_identifier(matrix_element, 982 group_number = group_number, group_elem_number = proc_id) 983 984 if 'prefix' in self.cmd_options and self.cmd_options['prefix'] in ['int','proc']: 985 for proc in matrix_element.get('processes'): 986 ids = [l.get('id') for l in proc.get('legs_with_decays')] 987 self.prefix_info[tuple(ids)] = [dict['proc_prefix'], proc.get_tag()] 988 989 # The proc_id is used for MadEvent grouping, so none of our concern here 990 # and it is simply set to an empty string. 991 dict['proc_id'] = '' 992 # Extract version number and date from VERSION file 993 info_lines = self.get_mg5_info_lines() 994 dict['info_lines'] = info_lines 995 # Extract process info lines 996 process_lines = self.get_process_info_lines(matrix_element) 997 dict['process_lines'] = process_lines 998 # Extract number of external particles 999 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 1000 dict['nexternal'] = nexternal 1001 dict['nincoming'] = ninitial 1002 # Extract ncomb 1003 ncomb = matrix_element.get_helicity_combinations() 1004 dict['ncomb'] = ncomb 1005 # Extract nloopamps 1006 nloopamps = matrix_element.get_number_of_loop_amplitudes() 1007 dict['nloopamps'] = nloopamps 1008 # Extract nloopdiags 1009 nloopdiags = len(matrix_element.get('diagrams')) 1010 dict['nloopdiags'] = nloopdiags 1011 # Extract nctamps 1012 nctamps = matrix_element.get_number_of_CT_amplitudes() 1013 dict['nctamps'] = nctamps 1014 # Extract nwavefuncs 1015 nwavefuncs = matrix_element.get_number_of_external_wavefunctions() 1016 dict['nwavefuncs'] = nwavefuncs 1017 # Set format of the double precision 1018 dict['real_dp_format']='real*8' 1019 dict['real_mp_format']='real*16' 1020 # Set format of the complex 1021 dict['complex_dp_format']='complex*16' 1022 dict['complex_mp_format']='complex*32' 1023 # Set format of the masses 1024 dict['mass_dp_format'] = dict['complex_dp_format'] 1025 dict['mass_mp_format'] = dict['complex_mp_format'] 1026 # Fill in default values for the placeholders for the madevent 1027 # loop-induced output 1028 dict['nmultichannels'] = 0 1029 dict['nmultichannel_configs'] = 0 1030 dict['config_map_definition'] = '' 1031 dict['config_index_map_definition'] = '' 1032 # Color matrix size 1033 # For loop induced processes it is NLOOPAMPSxNLOOPAMPS and otherwise 1034 # it is NLOOPAMPSxNBORNAMPS 1035 # Also, how to access the number of Born squared order contributions 1036 1037 if matrix_element.get('processes')[0].get('has_born'): 1038 dict['color_matrix_size'] = 'nbornamps' 1039 dict['get_nsqso_born']=\ 1040 "include 'nsqso_born.inc'" 1041 else: 1042 dict['get_nsqso_born']="""INTEGER NSQSO_BORN 1043 PARAMETER (NSQSO_BORN=0) 1044 """ 1045 dict['color_matrix_size'] = 'nloopamps' 1046 1047 # These placeholders help to have as many common templates for the 1048 # output of the loop induced processes and those with a born 1049 # contribution. 1050 if matrix_element.get('processes')[0].get('has_born'): 1051 # Extract nbornamps 1052 nbornamps = matrix_element.get_number_of_born_amplitudes() 1053 dict['nbornamps'] = nbornamps 1054 dict['ncomb_helas_objs'] = ',ncomb' 1055 dict['nbornamps_decl'] = \ 1056 """INTEGER NBORNAMPS 1057 PARAMETER (NBORNAMPS=%d)"""%nbornamps 1058 dict['nBornAmps'] = nbornamps 1059 1060 else: 1061 dict['ncomb_helas_objs'] = '' 1062 dict['dp_born_amps_decl'] = '' 1063 dict['dp_born_amps_decl_in_mp'] = '' 1064 dict['copy_mp_to_dp_born_amps'] = '' 1065 dict['mp_born_amps_decl'] = '' 1066 dict['nbornamps_decl'] = '' 1067 dict['nbornamps'] = 0 1068 dict['nBornAmps'] = 0 1069 1070 return dict
1071
1072 - def write_loop_matrix_element_v4(self, writer, matrix_element, fortran_model, 1073 group_number = None, proc_id = None, config_map = None):
1074 """ Writes loop_matrix.f, CT_interface.f, loop_num.f and 1075 mp_born_amps_and_wfs. 1076 The arguments group_number and proc_id are just for the LoopInduced 1077 output with MadEvent and only used in get_ME_identifier. 1078 """ 1079 1080 # Create the necessary files for the loop matrix element subroutine 1081 1082 if config_map: 1083 raise MadGraph5Error, 'The default loop output cannot be used with'+\ 1084 'MadEvent and cannot compute the AMP2 for multi-channeling.' 1085 1086 if not isinstance(fortran_model,\ 1087 helas_call_writers.FortranUFOHelasCallWriter): 1088 raise MadGraph5Error, 'The loop fortran output can only'+\ 1089 ' work with a UFO Fortran model' 1090 1091 LoopFortranModel = helas_call_writers.FortranUFOHelasCallWriter( 1092 argument=fortran_model.get('model'), 1093 hel_sum=matrix_element.get('processes')[0].get('has_born')) 1094 1095 # Compute the analytical information of the loop wavefunctions in the 1096 # loop helas matrix elements using the cached aloha model to reuse 1097 # as much as possible the aloha computations already performed for 1098 # writing out the aloha fortran subroutines. 1099 matrix_element.compute_all_analytic_information( 1100 self.get_aloha_model(matrix_element.get('processes')[0].get('model'))) 1101 1102 # Initialize a general replacement dictionary with entries common to 1103 # many files generated here. 1104 matrix_element.rep_dict = self.generate_general_replace_dict( 1105 matrix_element, group_number = group_number, proc_id = proc_id) 1106 1107 # Extract max number of loop couplings (specific to this output type) 1108 matrix_element.rep_dict['maxlcouplings']= \ 1109 matrix_element.find_max_loop_coupling() 1110 # The born amp declaration suited for also outputing the loop-induced 1111 # processes as well. 1112 if matrix_element.get('processes')[0].get('has_born'): 1113 matrix_element.rep_dict['dp_born_amps_decl_in_mp'] = \ 1114 matrix_element.rep_dict['complex_dp_format']+" DPAMP(NBORNAMPS,NCOMB)"+\ 1115 "\n common/%sAMPS/DPAMP"%matrix_element.rep_dict['proc_prefix'] 1116 matrix_element.rep_dict['dp_born_amps_decl'] = \ 1117 matrix_element.rep_dict['complex_dp_format']+" AMP(NBORNAMPS,NCOMB)"+\ 1118 "\n common/%sAMPS/AMP"%matrix_element.rep_dict['proc_prefix'] 1119 matrix_element.rep_dict['mp_born_amps_decl'] = \ 1120 matrix_element.rep_dict['complex_mp_format']+" AMP(NBORNAMPS,NCOMB)"+\ 1121 "\n common/%sMP_AMPS/AMP"%matrix_element.rep_dict['proc_prefix'] 1122 matrix_element.rep_dict['copy_mp_to_dp_born_amps'] = \ 1123 '\n'.join(['DO I=1,NBORNAMPS','DPAMP(I,H)=AMP(I,H)','ENDDO']) 1124 1125 if writer: 1126 raise MadGraph5Error, 'Matrix output mode no longer supported.' 1127 1128 filename = 'loop_matrix.f' 1129 calls = self.write_loopmatrix(writers.FortranWriter(filename), 1130 matrix_element, 1131 LoopFortranModel) 1132 1133 # Write out the proc_prefix in a file, this is quite handy 1134 proc_prefix_writer = writers.FortranWriter('proc_prefix.txt','w') 1135 proc_prefix_writer.write(matrix_element.rep_dict['proc_prefix']) 1136 proc_prefix_writer.close() 1137 1138 filename = 'check_sa.f' 1139 self.write_check_sa(writers.FortranWriter(filename),matrix_element) 1140 1141 filename = 'CT_interface.f' 1142 self.write_CT_interface(writers.FortranWriter(filename),\ 1143 matrix_element) 1144 1145 1146 1147 filename = 'improve_ps.f' 1148 calls = self.write_improve_ps(writers.FortranWriter(filename), 1149 matrix_element) 1150 1151 filename = 'loop_num.f' 1152 self.write_loop_num(writers.FortranWriter(filename),\ 1153 matrix_element,LoopFortranModel) 1154 1155 filename = 'mp_born_amps_and_wfs.f' 1156 self.write_born_amps_and_wfs(writers.FortranWriter(filename),\ 1157 matrix_element,LoopFortranModel) 1158 1159 # Extract number of external particles 1160 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 1161 filename = 'nexternal.inc' 1162 self.write_nexternal_file(writers.FortranWriter(filename), 1163 nexternal, ninitial) 1164 1165 filename = 'process_info.inc' 1166 self.write_process_info_file(writers.FortranWriter(filename), 1167 matrix_element) 1168 return calls
1169
1170 - def write_process_info_file(self, writer, matrix_element):
1171 """A small structural function to write the include file specifying some 1172 process characteristics.""" 1173 1174 model = matrix_element.get('processes')[0].get('model') 1175 process_info = {} 1176 # The maximum spin of any particle connected (or directly running in) 1177 # any loop of this matrix element. This is important because there is 1178 # some limitation in the stability tests that can be performed when this 1179 # maximum spin is above 3 (vectors). Also CutTools has limitations in 1180 # this regard. 1181 process_info['max_spin_connected_to_loop']=\ 1182 matrix_element.get_max_spin_connected_to_loop() 1183 1184 process_info['max_spin_external_particle']= max( 1185 model.get_particle(l.get('id')).get('spin') for l in 1186 matrix_element.get('processes')[0].get('legs')) 1187 1188 proc_include = \ 1189 """ 1190 INTEGER MAX_SPIN_CONNECTED_TO_LOOP 1191 PARAMETER(MAX_SPIN_CONNECTED_TO_LOOP=%(max_spin_connected_to_loop)d) 1192 INTEGER MAX_SPIN_EXTERNAL_PARTICLE 1193 PARAMETER(MAX_SPIN_EXTERNAL_PARTICLE=%(max_spin_external_particle)d) 1194 """%process_info 1195 1196 writer.writelines(proc_include)
1197
1198 - def generate_subprocess_directory(self, matrix_element, fortran_model):
1199 """ To overload the default name for this function such that the correct 1200 function is used when called from the command interface """ 1201 1202 self.unique_id +=1 1203 return self.generate_loop_subprocess(matrix_element,fortran_model, 1204 unique_id=self.unique_id)
1205
1206 - def write_check_sa(self, writer, matrix_element):
1207 """Writes out the steering code check_sa. In the optimized output mode, 1208 All the necessary entries in the replace_dictionary have already been 1209 set in write_loopmatrix because it is only there that one has access to 1210 the information about split orders.""" 1211 replace_dict = copy.copy(matrix_element.rep_dict) 1212 for key in ['print_so_born_results','print_so_loop_results', 1213 'write_so_born_results','write_so_loop_results','set_coupling_target']: 1214 if key not in replace_dict.keys(): 1215 replace_dict[key]='' 1216 1217 if matrix_element.get('processes')[0].get('has_born'): 1218 file = open(os.path.join(self.template_dir,'check_sa.inc')).read() 1219 else: 1220 file = open(os.path.join(self.template_dir,\ 1221 'check_sa_loop_induced.inc')).read() 1222 file=file%replace_dict 1223 writer.writelines(file) 1224 1225 # We can always write the f2py wrapper if present (in loop optimized mode, it is) 1226 if not os.path.isfile(pjoin(self.template_dir,'check_py.f.inc')): 1227 return 1228 1229 file = open(os.path.join(self.template_dir,\ 1230 'check_py.f.inc')).read() 1231 1232 if 'prefix' in self.cmd_options and self.cmd_options['prefix'] in ['int','proc']: 1233 replace_dict['prefix_routine'] = replace_dict['proc_prefix'] 1234 else: 1235 replace_dict['prefix_routine'] = '' 1236 file=file%replace_dict 1237 new_path = writer.name.replace('check_sa.f', 'f2py_wrapper.f') 1238 new_writer = writer.__class__(new_path, 'w') 1239 new_writer.writelines(file) 1240 1241 file = open(os.path.join(self.template_dir,\ 1242 'check_sa.py.inc')).read() 1243 # For now just put in an empty PS point but in the future, maybe generate 1244 # a valid one already here by default 1245 curr_proc = matrix_element.get('processes')[0] 1246 random_PSpoint_python_formatted = \ 1247 """# Specify your chosen PS point below. If you leave it filled with None, then the script will attempt to read it from the file PS.input. 1248 p= [[None,]*4]*%d"""%len(curr_proc.get('legs')) 1249 1250 process_definition_string = curr_proc.nice_string().replace('Process:','') 1251 file=file.format(random_PSpoint_python_formatted,process_definition_string, 1252 replace_dict['proc_prefix'].lower()) 1253 new_path = writer.name.replace('check_sa.f', 'check_sa.py') 1254 new_writer = open(new_path, 'w') 1255 new_writer.writelines(file) 1256 # Make it executable 1257 os.chmod(new_path, os.stat(new_path).st_mode | stat.S_IEXEC)
1258
1259 - def write_improve_ps(self, writer, matrix_element):
1260 """ Write out the improve_ps subroutines which modify the PS point 1261 given in input and slightly deform it to achieve exact onshellness on 1262 all external particles as well as perfect energy-momentum conservation""" 1263 replace_dict = copy.copy(matrix_element.rep_dict) 1264 1265 (nexternal,ninitial)=matrix_element.get_nexternal_ninitial() 1266 replace_dict['ninitial']=ninitial 1267 mass_list=matrix_element.get_external_masses()[:-2] 1268 mp_variable_prefix = check_param_card.ParamCard.mp_prefix 1269 1270 # Write the quadruple precision version of this routine only. 1271 replace_dict['real_format']=replace_dict['real_mp_format'] 1272 replace_dict['mp_prefix']='MP_' 1273 replace_dict['exp_letter']='e' 1274 replace_dict['mp_specifier']='_16' 1275 replace_dict['coupl_inc_name']='mp_coupl.inc' 1276 replace_dict['masses_def']='\n'.join(['MASSES(%(i)d)=%(prefix)s%(m)s'\ 1277 %{'i':i+1,'m':m, 'prefix':mp_variable_prefix} for \ 1278 i, m in enumerate(mass_list)]) 1279 file_mp = open(os.path.join(self.template_dir,'improve_ps.inc')).read() 1280 file_mp=file_mp%replace_dict 1281 # 1282 writer.writelines(file_mp)
1283
1284 - def write_loop_num(self, writer, matrix_element,fortran_model):
1285 """ Create the file containing the core subroutine called by CutTools 1286 which contains the Helas calls building the loop""" 1287 1288 if not matrix_element.get('processes') or \ 1289 not matrix_element.get('diagrams'): 1290 return 0 1291 1292 # Set lowercase/uppercase Fortran code 1293 writers.FortranWriter.downcase = False 1294 1295 file = open(os.path.join(self.template_dir,'loop_num.inc')).read() 1296 1297 replace_dict = copy.copy(matrix_element.rep_dict) 1298 1299 loop_helas_calls=fortran_model.get_loop_amplitude_helas_calls(matrix_element) 1300 replace_dict['maxlcouplings']=matrix_element.find_max_loop_coupling() 1301 replace_dict['loop_helas_calls'] = "\n".join(loop_helas_calls) 1302 1303 # The squaring is only necessary for the processes with born where the 1304 # sum over helicities is done before sending the numerator to CT. 1305 dp_squaring_lines=['DO I=1,NBORNAMPS', 1306 'CFTOT=DCMPLX(CF_N(AMPLNUM,I)/DBLE(ABS(CF_D(AMPLNUM,I))),0.0d0)', 1307 'IF(CF_D(AMPLNUM,I).LT.0) CFTOT=CFTOT*IMAG1', 1308 'RES=RES+CFTOT*BUFF*DCONJG(AMP(I,H))','ENDDO'] 1309 mp_squaring_lines=['DO I=1,NBORNAMPS', 1310 'CFTOT=CMPLX(CF_N(AMPLNUM,I)/(1.0E0_16*ABS(CF_D(AMPLNUM,I))),0.0E0_16,KIND=16)', 1311 'IF(CF_D(AMPLNUM,I).LT.0) CFTOT=CFTOT*IMAG1', 1312 'QPRES=QPRES+CFTOT*BUFF*CONJG(AMP(I,H))','ENDDO'] 1313 if matrix_element.get('processes')[0].get('has_born'): 1314 replace_dict['dp_squaring']='\n'.join(dp_squaring_lines) 1315 replace_dict['mp_squaring']='\n'.join(mp_squaring_lines) 1316 else: 1317 replace_dict['dp_squaring']='RES=BUFF' 1318 replace_dict['mp_squaring']='QPRES=BUFF' 1319 1320 # Prepend MP_ to all helas calls. 1321 self.turn_to_mp_calls(loop_helas_calls) 1322 replace_dict['mp_loop_helas_calls'] = "\n".join(loop_helas_calls) 1323 1324 file=file%replace_dict 1325 1326 if writer: 1327 writer.writelines(file) 1328 else: 1329 return file
1330
1331 - def write_CT_interface(self, writer, matrix_element, optimized_output=False):
1332 """ Create the file CT_interface.f which contains the subroutine defining 1333 the loop HELAS-like calls along with the general interfacing subroutine. 1334 It is used to interface against any OPP tool, including Samurai and Ninja.""" 1335 1336 files=[] 1337 1338 # First write CT_interface which interfaces MG5 with CutTools. 1339 replace_dict=copy.copy(matrix_element.rep_dict) 1340 1341 # We finalize CT result differently wether we used the built-in 1342 # squaring against the born. 1343 if matrix_element.get('processes')[0].get('has_born'): 1344 replace_dict['finalize_CT']='\n'.join([\ 1345 'RES(%d)=NORMALIZATION*2.0d0*DBLE(RES(%d))'%(i,i) for i in range(1,4)]) 1346 else: 1347 replace_dict['finalize_CT']='\n'.join([\ 1348 'RES(%d)=NORMALIZATION*RES(%d)'%(i,i) for i in range(1,4)]) 1349 1350 file = open(os.path.join(self.template_dir,'CT_interface.inc')).read() 1351 1352 file = file % replace_dict 1353 files.append(file) 1354 1355 # Now collect the different kind of subroutines needed for the 1356 # loop HELAS-like calls. 1357 HelasLoopAmpsCallKeys=matrix_element.get_used_helas_loop_amps() 1358 1359 for callkey in HelasLoopAmpsCallKeys: 1360 replace_dict=copy.copy(matrix_element.rep_dict) 1361 # Add to this dictionary all other attribute common to all 1362 # HELAS-like loop subroutines. 1363 if matrix_element.get('processes')[0].get('has_born'): 1364 replace_dict['validh_or_nothing']=',validh' 1365 else: 1366 replace_dict['validh_or_nothing']='' 1367 # In the optimized output, the number of couplings in the loop is 1368 # not specified so we only treat it here if necessary: 1369 if len(callkey)>2: 1370 replace_dict['ncplsargs']=callkey[2] 1371 cplsargs="".join(["C%d,MP_C%d, "%(i,i) for i in range(1,callkey[2]+1)]) 1372 replace_dict['cplsargs']=cplsargs 1373 cplsdecl="".join(["C%d, "%i for i in range(1,callkey[2]+1)])[:-2] 1374 replace_dict['cplsdecl']=cplsdecl 1375 mp_cplsdecl="".join(["MP_C%d, "%i for i in range(1,callkey[2]+1)])[:-2] 1376 replace_dict['mp_cplsdecl']=mp_cplsdecl 1377 cplset="\n".join(["\n".join(["LC(%d)=C%d"%(i,i),\ 1378 "MP_LC(%d)=MP_C%d"%(i,i)])\ 1379 for i in range(1,callkey[2]+1)]) 1380 replace_dict['cplset']=cplset 1381 1382 replace_dict['nloopline']=callkey[0] 1383 wfsargs="".join(["W%d, "%i for i in range(1,callkey[1]+1)]) 1384 replace_dict['wfsargs']=wfsargs 1385 # We don't pass the multiple precision mass in the optimized_output 1386 if not optimized_output: 1387 margs="".join(["M%d,MP_M%d, "%(i,i) for i in range(1,callkey[0]+1)]) 1388 else: 1389 margs="".join(["M%d, "%i for i in range(1,callkey[0]+1)]) 1390 replace_dict['margs']=margs 1391 wfsargsdecl="".join([("W%d, "%i) for i in range(1,callkey[1]+1)])[:-2] 1392 replace_dict['wfsargsdecl']=wfsargsdecl 1393 margsdecl="".join(["M%d, "%i for i in range(1,callkey[0]+1)])[:-2] 1394 replace_dict['margsdecl']=margsdecl 1395 mp_margsdecl="".join(["MP_M%d, "%i for i in range(1,callkey[0]+1)])[:-2] 1396 replace_dict['mp_margsdecl']=mp_margsdecl 1397 weset="\n".join([("WE("+str(i)+")=W"+str(i)) for \ 1398 i in range(1,callkey[1]+1)]) 1399 replace_dict['weset']=weset 1400 weset="\n".join([("WE(%d)=W%d"%(i,i)) for i in range(1,callkey[1]+1)]) 1401 replace_dict['weset']=weset 1402 msetlines=["M2L(1)=M%d**2"%(callkey[0]),] 1403 mset="\n".join(msetlines+["M2L(%d)=M%d**2"%(i,i-1) for \ 1404 i in range(2,callkey[0]+1)]) 1405 replace_dict['mset']=mset 1406 mset2lines=["ML(1)=M%d"%(callkey[0]),"ML(2)=M%d"%(callkey[0]), 1407 "MP_ML(1)=MP_M%d"%(callkey[0]),"MP_ML(2)=MP_M%d"%(callkey[0])] 1408 mset2="\n".join(mset2lines+["\n".join(["ML(%d)=M%d"%(i,i-2), 1409 "MP_ML(%d)=MP_M%d"%(i,i-2)]) for \ 1410 i in range(3,callkey[0]+3)]) 1411 replace_dict['mset2']=mset2 1412 replace_dict['nwfsargs'] = callkey[1] 1413 if callkey[0]==callkey[1]: 1414 replace_dict['nwfsargs_header'] = "" 1415 replace_dict['pairingargs']="" 1416 replace_dict['pairingdecl']="" 1417 pairingset="""DO I=1,NLOOPLINE 1418 PAIRING(I)=1 1419 ENDDO 1420 """ 1421 replace_dict['pairingset']=pairingset 1422 else: 1423 replace_dict['nwfsargs_header'] = '_%d'%callkey[1] 1424 pairingargs="".join([("P"+str(i)+", ") for i in \ 1425 range(1,callkey[0]+1)]) 1426 replace_dict['pairingargs']=pairingargs 1427 pairingdecl="integer "+"".join([("P"+str(i)+", ") for i in \ 1428 range(1,callkey[0]+1)])[:-2] 1429 replace_dict['pairingdecl']=pairingdecl 1430 pairingset="\n".join([("PAIRING("+str(i)+")=P"+str(i)) for \ 1431 i in range(1,callkey[0]+1)]) 1432 replace_dict['pairingset']=pairingset 1433 1434 file = open(os.path.join(self.template_dir,\ 1435 'helas_loop_amplitude.inc')).read() 1436 file = file % replace_dict 1437 files.append(file) 1438 1439 file="\n".join(files) 1440 1441 if writer: 1442 writer.writelines(file,context=self.get_context(matrix_element)) 1443 else: 1444 return file
1445 1446 # Helper function to split HELAS CALLS in dedicated subroutines placed 1447 # in different files.
1448 - def split_HELASCALLS(self, writer, replace_dict, template_name, masterfile, \ 1449 helas_calls, entry_name, bunch_name,n_helas=2000, 1450 required_so_broadcaster = 'LOOP_REQ_SO_DONE', 1451 continue_label = 1000, momenta_array_name='P', 1452 context={}):
1453 """ Finish the code generation with splitting. 1454 Split the helas calls in the argument helas_calls into bunches of 1455 size n_helas and place them in dedicated subroutine with name 1456 <bunch_name>_i. Also setup the corresponding calls to these subroutine 1457 in the replace_dict dictionary under the entry entry_name. 1458 The context specified will be forwarded to the the fileWriter.""" 1459 helascalls_replace_dict=copy.copy(replace_dict) 1460 helascalls_replace_dict['bunch_name']=bunch_name 1461 helascalls_files=[] 1462 for i, k in enumerate(range(0, len(helas_calls), n_helas)): 1463 helascalls_replace_dict['bunch_number']=i+1 1464 helascalls_replace_dict['helas_calls']=\ 1465 '\n'.join(helas_calls[k:k + n_helas]) 1466 helascalls_replace_dict['required_so_broadcaster']=\ 1467 required_so_broadcaster 1468 helascalls_replace_dict['continue_label']=continue_label 1469 new_helascalls_file = open(os.path.join(self.template_dir,\ 1470 template_name)).read() 1471 new_helascalls_file = new_helascalls_file % helascalls_replace_dict 1472 helascalls_files.append(new_helascalls_file) 1473 # Setup the call to these HELASCALLS subroutines in loop_matrix.f 1474 helascalls_calls = [ "CALL %s%s_%d(%s,NHEL,H,IC)"%\ 1475 (replace_dict['proc_prefix'] ,bunch_name,a+1,momenta_array_name) \ 1476 for a in range(len(helascalls_files))] 1477 replace_dict[entry_name]='\n'.join(helascalls_calls) 1478 if writer: 1479 for i, helascalls_file in enumerate(helascalls_files): 1480 filename = '%s_%d.f'%(bunch_name,i+1) 1481 writers.FortranWriter(filename).writelines(helascalls_file, 1482 context=context) 1483 else: 1484 masterfile='\n'.join([masterfile,]+helascalls_files) 1485 1486 return masterfile
1487
1488 - def write_loopmatrix(self, writer, matrix_element, fortran_model, 1489 noSplit=False):
1490 """Create the loop_matrix.f file.""" 1491 1492 if not matrix_element.get('processes') or \ 1493 not matrix_element.get('diagrams'): 1494 return 0 1495 1496 # Set lowercase/uppercase Fortran code 1497 1498 writers.FortranWriter.downcase = False 1499 1500 replace_dict = copy.copy(matrix_element.rep_dict) 1501 1502 # Extract overall denominator 1503 # Averaging initial state color, spin, and identical FS particles 1504 den_factor_line = self.get_den_factor_line(matrix_element) 1505 replace_dict['den_factor_line'] = den_factor_line 1506 # When the user asks for the polarized matrix element we must 1507 # multiply back by the helicity averaging factor 1508 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor() 1509 replace_dict['beamone_helavgfactor'], replace_dict['beamtwo_helavgfactor'] =\ 1510 matrix_element.get_beams_hel_avg_factor() 1511 1512 # These entries are specific for the output for loop-induced processes 1513 # Also sets here the details of the squaring of the loop ampltiudes 1514 # with the born or the loop ones. 1515 if not matrix_element.get('processes')[0].get('has_born'): 1516 replace_dict['compute_born']=\ 1517 """C There is of course no born for loop induced processes 1518 ANS(0)=0.0d0 1519 """ 1520 replace_dict['set_reference']='\n'.join([ 1521 'C For loop-induced, the reference for comparison is set later'+\ 1522 ' from the total contribution of the previous PS point considered.', 1523 'C But you can edit here the value to be used for the first PS point.', 1524 'if (NPSPOINTS.eq.0) then','ref=1.0d-50','else', 1525 'ref=nextRef/DBLE(NPSPOINTS)','endif']) 1526 replace_dict['loop_induced_setup'] = '\n'.join([ 1527 'HELPICKED_BU=HELPICKED','HELPICKED=H','MP_DONE=.FALSE.', 1528 'IF(SKIPLOOPEVAL) THEN','GOTO 1227','ENDIF']) 1529 replace_dict['loop_induced_finalize'] = \ 1530 ("""DO I=NCTAMPS+1,NLOOPAMPS 1531 IF((CTMODERUN.NE.-1).AND..NOT.CHECKPHASE.AND.(.NOT.S(I))) THEN 1532 WRITE(*,*) '##W03 WARNING Contribution ',I 1533 WRITE(*,*) ' is unstable for helicity ',H 1534 ENDIF 1535 C IF(.NOT.%(proc_prefix)sISZERO(ABS(AMPL(2,I))+ABS(AMPL(3,I)),REF,-1,H)) THEN 1536 C WRITE(*,*) '##W04 WARNING Contribution ',I,' for helicity ',H,' has a contribution to the poles.' 1537 C WRITE(*,*) 'Finite contribution = ',AMPL(1,I) 1538 C WRITE(*,*) 'single pole contribution = ',AMPL(2,I) 1539 C WRITE(*,*) 'double pole contribution = ',AMPL(3,I) 1540 C ENDIF 1541 ENDDO 1542 1227 CONTINUE 1543 HELPICKED=HELPICKED_BU""")%replace_dict 1544 replace_dict['loop_helas_calls']="" 1545 replace_dict['nctamps_or_nloopamps']='nloopamps' 1546 replace_dict['nbornamps_or_nloopamps']='nloopamps' 1547 replace_dict['squaring']=\ 1548 """ANS(1)=ANS(1)+DBLE(CFTOT*AMPL(1,I)*DCONJG(AMPL(1,J))) 1549 IF (J.EQ.1) THEN 1550 ANS(2)=ANS(2)+DBLE(CFTOT*AMPL(2,I))+DIMAG(CFTOT*AMPL(2,I)) 1551 ANS(3)=ANS(3)+DBLE(CFTOT*AMPL(3,I))+DIMAG(CFTOT*AMPL(3,I)) 1552 ENDIF""" 1553 else: 1554 replace_dict['compute_born']=\ 1555 """C Compute the born, for a specific helicity if asked so. 1556 call %(proc_prefix)ssmatrixhel(P_USER,USERHEL,ANS(0)) 1557 """%matrix_element.rep_dict 1558 replace_dict['set_reference']=\ 1559 """C We chose to use the born evaluation for the reference 1560 call %(proc_prefix)ssmatrix(p,ref)"""%matrix_element.rep_dict 1561 replace_dict['loop_induced_helas_calls'] = "" 1562 replace_dict['loop_induced_finalize'] = "" 1563 replace_dict['loop_induced_setup'] = "" 1564 replace_dict['nctamps_or_nloopamps']='nctamps' 1565 replace_dict['nbornamps_or_nloopamps']='nbornamps' 1566 replace_dict['squaring']='\n'.join(['DO K=1,3', 1567 'ANS(K)=ANS(K)+2.0d0*DBLE(CFTOT*AMPL(K,I)*DCONJG(AMP(J,H)))', 1568 'ENDDO']) 1569 1570 # Write a dummy nsquaredSO.inc which is used in the default 1571 # loop_matrix.f code (even though it does not support split orders evals) 1572 # just to comply with the syntax expected from the external code using MadLoop. 1573 writers.FortranWriter('nsquaredSO.inc').writelines( 1574 """INTEGER NSQUAREDSO 1575 PARAMETER (NSQUAREDSO=0)""") 1576 1577 # Actualize results from the loops computed. Only necessary for 1578 # processes with a born. 1579 actualize_ans=[] 1580 if matrix_element.get('processes')[0].get('has_born'): 1581 actualize_ans.append("DO I=NCTAMPS+1,NLOOPAMPS") 1582 actualize_ans.extend("ANS(%d)=ANS(%d)+AMPL(%d,I)"%(i,i,i) for i \ 1583 in range(1,4)) 1584 actualize_ans.append(\ 1585 "IF((CTMODERUN.NE.-1).AND..NOT.CHECKPHASE.AND.(.NOT.S(I))) THEN") 1586 actualize_ans.append(\ 1587 "WRITE(*,*) '##W03 WARNING Contribution ',I,' is unstable.'") 1588 actualize_ans.extend(["ENDIF","ENDDO"]) 1589 replace_dict['actualize_ans']='\n'.join(actualize_ans) 1590 else: 1591 replace_dict['actualize_ans']=\ 1592 ("""C We add five powers to the reference value to loosen a bit the vanishing pole check. 1593 C IF(.NOT.(CHECKPHASE.OR.(.NOT.HELDOUBLECHECKED)).AND..NOT.%(proc_prefix)sISZERO(ABS(ANS(2))+ABS(ANS(3)),ABS(ANS(1))*(10.0d0**5),-1,H)) THEN 1594 C WRITE(*,*) '##W05 WARNING Found a PS point with a contribution to the single pole.' 1595 C WRITE(*,*) 'Finite contribution = ',ANS(1) 1596 C WRITE(*,*) 'single pole contribution = ',ANS(2) 1597 C WRITE(*,*) 'double pole contribution = ',ANS(3) 1598 C ENDIF""")%replace_dict 1599 1600 # Write out the color matrix 1601 (CMNum,CMDenom) = self.get_color_matrix(matrix_element) 1602 CMWriter=open(pjoin('..','MadLoop5_resources', 1603 '%(proc_prefix)sColorNumFactors.dat'%matrix_element.rep_dict),'w') 1604 for ColorLine in CMNum: 1605 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 1606 CMWriter.close() 1607 CMWriter=open(pjoin('..','MadLoop5_resources', 1608 '%(proc_prefix)sColorDenomFactors.dat'%matrix_element.rep_dict),'w') 1609 for ColorLine in CMDenom: 1610 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 1611 CMWriter.close() 1612 1613 # Write out the helicity configurations 1614 HelConfigs=matrix_element.get_helicity_matrix() 1615 HelConfigWriter=open(pjoin('..','MadLoop5_resources', 1616 '%(proc_prefix)sHelConfigs.dat'%matrix_element.rep_dict),'w') 1617 for HelConfig in HelConfigs: 1618 HelConfigWriter.write(' '.join(['%d'%H for H in HelConfig])+'\n') 1619 HelConfigWriter.close() 1620 1621 # Extract helas calls 1622 loop_amp_helas_calls = fortran_model.get_loop_amp_helas_calls(\ 1623 matrix_element) 1624 # The proc_prefix must be replaced 1625 loop_amp_helas_calls = [lc % matrix_element.rep_dict 1626 for lc in loop_amp_helas_calls] 1627 1628 born_ct_helas_calls, UVCT_helas_calls = \ 1629 fortran_model.get_born_ct_helas_calls(matrix_element) 1630 # In the default output, we do not need to separate these two kind of 1631 # contributions 1632 born_ct_helas_calls = born_ct_helas_calls + UVCT_helas_calls 1633 file = open(os.path.join(self.template_dir,\ 1634 1635 'loop_matrix_standalone.inc')).read() 1636 1637 if matrix_element.get('processes')[0].get('has_born'): 1638 toBeRepaced='loop_helas_calls' 1639 else: 1640 toBeRepaced='loop_induced_helas_calls' 1641 1642 # Decide here wether we need to split the loop_matrix.f file or not. 1643 if (not noSplit and (len(matrix_element.get_all_amplitudes())>1000)): 1644 file=self.split_HELASCALLS(writer,replace_dict,\ 1645 'helas_calls_split.inc',file,born_ct_helas_calls,\ 1646 'born_ct_helas_calls','helas_calls_ampb') 1647 file=self.split_HELASCALLS(writer,replace_dict,\ 1648 'helas_calls_split.inc',file,loop_amp_helas_calls,\ 1649 toBeRepaced,'helas_calls_ampl') 1650 else: 1651 replace_dict['born_ct_helas_calls']='\n'.join(born_ct_helas_calls) 1652 replace_dict[toBeRepaced]='\n'.join(loop_amp_helas_calls) 1653 1654 file = file % replace_dict 1655 1656 loop_calls_finder = re.compile(r'^\s*CALL\S*LOOP\S*') 1657 n_loop_calls = len(filter(lambda call: 1658 not loop_calls_finder.match(call) is None, loop_amp_helas_calls)) 1659 if writer: 1660 # Write the file 1661 writer.writelines(file) 1662 return n_loop_calls 1663 else: 1664 # Return it to be written along with the others 1665 return n_loop_calls, file
1666
1667 - def write_bornmatrix(self, writer, matrix_element, fortran_model):
1668 """Create the born_matrix.f file for the born process as for a standard 1669 tree-level computation.""" 1670 1671 if not matrix_element.get('processes') or \ 1672 not matrix_element.get('diagrams'): 1673 return 0 1674 1675 if not isinstance(writer, writers.FortranWriter): 1676 raise writers.FortranWriter.FortranWriterError(\ 1677 "writer not FortranWriter") 1678 1679 # For now, we can use the exact same treatment as for tree-level 1680 # computations by redefining here a regular HelasMatrixElementf or the 1681 # born process. 1682 # It is important to make a deepcopy, as we don't want any possible 1683 # treatment on the objects of the bornME to have border effects on 1684 # the content of the LoopHelasMatrixElement object. 1685 bornME = helas_objects.HelasMatrixElement() 1686 for prop in bornME.keys(): 1687 bornME.set(prop,copy.deepcopy(matrix_element.get(prop))) 1688 bornME.set('base_amplitude',None,force=True) 1689 bornME.set('diagrams',copy.deepcopy(\ 1690 matrix_element.get_born_diagrams())) 1691 bornME.set('color_basis',copy.deepcopy(\ 1692 matrix_element.get('born_color_basis'))) 1693 bornME.set('color_matrix',copy.deepcopy(\ 1694 color_amp.ColorMatrix(bornME.get('color_basis')))) 1695 # This is to decide wether once to reuse old wavefunction to store new 1696 # ones (provided they are not used further in the code.) 1697 bornME.optimization = True 1698 return super(LoopProcessExporterFortranSA,self).write_matrix_element_v4( 1699 writer, bornME, fortran_model, 1700 proc_prefix=matrix_element.rep_dict['proc_prefix'])
1701
1702 - def write_born_amps_and_wfs(self, writer, matrix_element, fortran_model, 1703 noSplit=False):
1704 """ Writes out the code for the subroutine MP_BORN_AMPS_AND_WFS which 1705 computes just the external wavefunction and born amplitudes in 1706 multiple precision. """ 1707 1708 if not matrix_element.get('processes') or \ 1709 not matrix_element.get('diagrams'): 1710 return 0 1711 1712 replace_dict = copy.copy(matrix_element.rep_dict) 1713 1714 # For the wavefunction copy, check what suffix is needed for the W array 1715 if matrix_element.get('processes')[0].get('has_born'): 1716 replace_dict['h_w_suffix']=',H' 1717 else: 1718 replace_dict['h_w_suffix']='' 1719 1720 # Extract helas calls 1721 born_amps_and_wfs_calls , uvct_amp_calls = \ 1722 fortran_model.get_born_ct_helas_calls(matrix_element, include_CT=True) 1723 # In the default output, these two kind of contributions do not need to 1724 # be differentiated 1725 born_amps_and_wfs_calls = born_amps_and_wfs_calls + uvct_amp_calls 1726 1727 # Turn these HELAS calls to the multiple-precision version of the HELAS 1728 # subroutines. 1729 self.turn_to_mp_calls(born_amps_and_wfs_calls) 1730 1731 file = open(os.path.join(self.template_dir,\ 1732 'mp_born_amps_and_wfs.inc')).read() 1733 # Decide here wether we need to split the loop_matrix.f file or not. 1734 if (not noSplit and (len(matrix_element.get_all_amplitudes())>2000)): 1735 file=self.split_HELASCALLS(writer,replace_dict,\ 1736 'mp_helas_calls_split.inc',file,\ 1737 born_amps_and_wfs_calls,'born_amps_and_wfs_calls',\ 1738 'mp_helas_calls') 1739 else: 1740 replace_dict['born_amps_and_wfs_calls']=\ 1741 '\n'.join(born_amps_and_wfs_calls) 1742 1743 file = file % replace_dict 1744 if writer: 1745 # Write the file 1746 writer.writelines(file) 1747 else: 1748 # Return it to be written along with the others 1749 return file 1750 1751 #=============================================================================== 1752 # LoopProcessOptimizedExporterFortranSA 1753 #=============================================================================== 1754
1755 -class LoopProcessOptimizedExporterFortranSA(LoopProcessExporterFortranSA):
1756 """Class to take care of exporting a set of loop matrix elements in the 1757 Fortran format which exploits the Pozzorini method of representing 1758 the loop numerators as polynomial to render its evaluations faster.""" 1759 1760 template_dir=os.path.join(_file_path,'iolibs/template_files/loop_optimized') 1761 # The option below controls wether one wants to group together in one single 1762 # CutTools/TIR call the loops with same denominator structure 1763 forbid_loop_grouping = False 1764 1765 # List of potential TIR library one wants to link to. 1766 # Golem and Samurai will typically get obtained from gosam_contrib 1767 # which might also contain a version of ninja. We must therefore 1768 # make sure that ninja appears first in the list of -L because 1769 # it is the tool for which the user is most susceptible of 1770 # using a standalone verison independent of gosam_contrib 1771 all_tir=['pjfry','iregi','ninja','golem','samurai','collier'] 1772
1773 - def __init__(self, dir_path = "", opt=None):
1774 """Initiate the LoopProcessOptimizedExporterFortranSA with directory 1775 information on where to find all the loop-related source files, 1776 like CutTools and TIR""" 1777 1778 super(LoopProcessOptimizedExporterFortranSA,self).__init__(dir_path, opt) 1779 1780 # TIR available ones 1781 self.tir_available_dict={'pjfry':True,'iregi':True,'golem':True, 1782 'samurai':True,'ninja':True,'collier':True} 1783 1784 for tir in self.all_tir: 1785 tir_dir="%s_dir"%tir 1786 if tir_dir in self.opt and not self.opt[tir_dir] is None: 1787 # Make sure to defer the 'local path' to the current MG5aMC root. 1788 tir_path = self.opt[tir_dir].strip() 1789 if tir_path.startswith('.'): 1790 tir_path = os.path.abspath(pjoin(MG5DIR,tir_path)) 1791 setattr(self,tir_dir,tir_path) 1792 else: 1793 setattr(self,tir_dir,'')
1794
1795 - def copy_template(self, model):
1796 """Additional actions needed to setup the Template. 1797 """ 1798 1799 super(LoopProcessOptimizedExporterFortranSA, self).copy_template(model) 1800 1801 self.loop_optimized_additional_template_setup()
1802
1803 - def get_context(self,matrix_element, **opts):
1804 """ Additional contextual information which needs to be created for 1805 the optimized output.""" 1806 1807 context = LoopProcessExporterFortranSA.get_context(self, matrix_element, 1808 **opts) 1809 1810 # For now assume Ninja always supports quadruple precision 1811 try: 1812 context['ninja_supports_quad_prec'] = \ 1813 misc.get_ninja_quad_prec_support(getattr(self,'ninja_dir')) 1814 except AttributeError: 1815 context['ninja_supports_quad_prec'] = False 1816 1817 for tir in self.all_tir: 1818 context['%s_available'%tir]=self.tir_available_dict[tir] 1819 # safety check 1820 if tir not in ['golem','pjfry','iregi','samurai','ninja','collier']: 1821 raise MadGraph5Error,"%s was not a TIR currently interfaced."%tir_name 1822 1823 return context
1824
1826 """ Perform additional actions specific for this class when setting 1827 up the template with the copy_template function.""" 1828 1829 # We must link the TIR to the Library folder of the active Template 1830 link_tir_libs=[] 1831 tir_libs=[] 1832 tir_include=[] 1833 1834 for tir in self.all_tir: 1835 tir_dir="%s_dir"%tir 1836 libpath=getattr(self,tir_dir) 1837 libname="lib%s.a"%tir 1838 tir_name=tir 1839 libpath = self.link_TIR(os.path.join(self.dir_path, 'lib'), 1840 libpath,libname,tir_name=tir_name) 1841 if libpath != "": 1842 if tir in ['ninja','pjfry','golem','samurai','collier']: 1843 # It is cleaner to use the original location of the libraries 1844 link_tir_libs.append('-L%s/ -l%s'%(libpath,tir)) 1845 tir_libs.append('%s/lib%s.$(libext)'%(libpath,tir)) 1846 # For Ninja, we must also link against OneLoop. 1847 if tir in ['ninja']: 1848 if not any(os.path.isfile(pjoin(libpath,'libavh_olo.%s'%ext)) 1849 for ext in ['a','dylib','so']): 1850 raise MadGraph5Error( 1851 "The OneLOop library 'libavh_olo.(a|dylib|so)' could no be found in path '%s'. Please place a symlink to it there."%libpath) 1852 link_tir_libs.append('-L%s/ -l%s'%(libpath,'avh_olo')) 1853 tir_libs.append('%s/lib%s.$(libext)'%(libpath,'avh_olo')) 1854 if tir in ['ninja','golem', 'samurai','collier']: 1855 trgt_path = pjoin(os.path.dirname(libpath),'include') 1856 if os.path.isdir(trgt_path): 1857 to_include = misc.find_includes_path(trgt_path, 1858 self.include_names[tir]) 1859 else: 1860 to_include = None 1861 # Special possible location for collier 1862 if to_include is None and tir=='collier': 1863 to_include = misc.find_includes_path( 1864 pjoin(libpath,'modules'),self.include_names[tir]) 1865 if to_include is None: 1866 logger.error( 1867 'Could not find the include directory for %s, looking in %s.\n' % (tir, str(trgt_path))+ 1868 'Generation carries on but you will need to edit the include path by hand in the makefiles.') 1869 to_include = '<Not_found_define_it_yourself>' 1870 tir_include.append('-I %s'%str(to_include)) 1871 # To be able to easily compile a MadLoop library using 1872 # makefiles built outside of the MG5_aMC framework 1873 # (such as what is done with the Sherpa interface), we 1874 # place here an easy handle on the golem includes 1875 name_map = {'golem':'golem95','samurai':'samurai', 1876 'ninja':'ninja','collier':'collier'} 1877 ln(to_include, starting_dir=pjoin(self.dir_path,'lib'), 1878 name='%s_include'%name_map[tir],abspath=True) 1879 ln(libpath, starting_dir=pjoin(self.dir_path,'lib'), 1880 name='%s_lib'%name_map[tir],abspath=True) 1881 else : 1882 link_tir_libs.append('-l%s'%tir) 1883 tir_libs.append('$(LIBDIR)lib%s.$(libext)'%tir) 1884 1885 MadLoop_makefile_definitions = pjoin(self.dir_path,'SubProcesses', 1886 'MadLoop_makefile_definitions') 1887 if os.path.isfile(MadLoop_makefile_definitions): 1888 os.remove(MadLoop_makefile_definitions) 1889 1890 calls = self.write_loop_makefile_definitions( 1891 writers.MakefileWriter(MadLoop_makefile_definitions), 1892 link_tir_libs,tir_libs, tir_include=tir_include) 1893 1894 # Finally overwrite MadLoopCommons.f now that we know the availibility of 1895 # COLLIER. 1896 MadLoopCommon = open(os.path.join(self.loop_dir,'StandAlone', 1897 "SubProcesses","MadLoopCommons.inc")).read() 1898 writer = writers.FortranWriter(os.path.join(self.dir_path, 1899 "SubProcesses","MadLoopCommons.f")) 1900 writer.writelines(MadLoopCommon%{ 1901 'print_banner_commands':self.MadLoop_banner}, context={ 1902 'collier_available':self.tir_available_dict['collier']}) 1903 writer.close()
1904 1916 1917 2045
2046 - def set_group_loops(self, matrix_element):
2047 """ Decides whether we must group loops or not for this matrix element""" 2048 2049 # Decide if loops sharing same denominator structures have to be grouped 2050 # together or not. 2051 if self.forbid_loop_grouping: 2052 self.group_loops = False 2053 else: 2054 self.group_loops = (not self.get_context(matrix_element)['ComputeColorFlows'])\ 2055 and matrix_element.get('processes')[0].get('has_born') 2056 2057 return self.group_loops
2058
2059 - def finalize(self, matrix_element, cmdhistory, MG5options, outputflag):
2060 """create the global information for loops""" 2061 2062 super(LoopProcessOptimizedExporterFortranSA,self).finalize(matrix_element, 2063 cmdhistory, MG5options, outputflag) 2064 self.write_global_specs(matrix_element)
2065 2066 2067
2068 - def write_loop_matrix_element_v4(self, writer, matrix_element, fortran_model, 2069 group_number = None, proc_id = None, config_map = None):
2070 """ Writes loop_matrix.f, CT_interface.f,TIR_interface.f,GOLEM_inteface.f 2071 and loop_num.f only but with the optimized FortranModel. 2072 The arguments group_number and proc_id are just for the LoopInduced 2073 output with MadEvent and only used in get_ME_identifier.""" 2074 2075 # Warn the user that the 'matrix' output where all relevant code is 2076 # put together in a single file is not supported in this loop output. 2077 if writer: 2078 raise MadGraph5Error, 'Matrix output mode no longer supported.' 2079 2080 if not isinstance(fortran_model,\ 2081 helas_call_writers.FortranUFOHelasCallWriter): 2082 raise MadGraph5Error, 'The optimized loop fortran output can only'+\ 2083 ' work with a UFO Fortran model' 2084 OptimizedFortranModel=\ 2085 helas_call_writers.FortranUFOHelasCallWriterOptimized(\ 2086 fortran_model.get('model'),False) 2087 2088 2089 if not matrix_element.get('processes')[0].get('has_born') and \ 2090 not self.compute_color_flows: 2091 logger.debug("Color flows will be employed despite the option"+\ 2092 " 'loop_color_flows' being set to False because it is necessary"+\ 2093 " for optimizations.") 2094 2095 # Compute the analytical information of the loop wavefunctions in the 2096 # loop helas matrix elements using the cached aloha model to reuse 2097 # as much as possible the aloha computations already performed for 2098 # writing out the aloha fortran subroutines. 2099 matrix_element.compute_all_analytic_information( 2100 self.get_aloha_model(matrix_element.get('processes')[0].get('model'))) 2101 2102 self.set_group_loops(matrix_element) 2103 2104 # Initialize a general replacement dictionary with entries common to 2105 # many files generated here. 2106 matrix_element.rep_dict = LoopProcessExporterFortranSA.\ 2107 generate_general_replace_dict(self, matrix_element, 2108 group_number = group_number, proc_id = proc_id) 2109 2110 # and those specific to the optimized output 2111 self.set_optimized_output_specific_replace_dict_entries(matrix_element) 2112 2113 # Create the necessary files for the loop matrix element subroutine 2114 proc_prefix_writer = writers.FortranWriter('proc_prefix.txt','w') 2115 proc_prefix_writer.write(matrix_element.rep_dict['proc_prefix']) 2116 proc_prefix_writer.close() 2117 2118 filename = 'loop_matrix.f' 2119 calls = self.write_loopmatrix(writers.FortranWriter(filename), 2120 matrix_element, 2121 OptimizedFortranModel) 2122 2123 filename = 'check_sa.f' 2124 self.write_check_sa(writers.FortranWriter(filename),matrix_element) 2125 2126 filename = 'polynomial.f' 2127 calls = self.write_polynomial_subroutines( 2128 writers.FortranWriter(filename), 2129 matrix_element) 2130 2131 filename = 'improve_ps.f' 2132 calls = self.write_improve_ps(writers.FortranWriter(filename), 2133 matrix_element) 2134 2135 filename = 'CT_interface.f' 2136 self.write_CT_interface(writers.FortranWriter(filename),\ 2137 matrix_element) 2138 2139 filename = 'TIR_interface.f' 2140 self.write_TIR_interface(writers.FortranWriter(filename), 2141 matrix_element) 2142 2143 if 'golem' in self.tir_available_dict and self.tir_available_dict['golem']: 2144 filename = 'GOLEM_interface.f' 2145 self.write_GOLEM_interface(writers.FortranWriter(filename), 2146 matrix_element) 2147 2148 if 'collier' in self.tir_available_dict and self.tir_available_dict['collier']: 2149 filename = 'COLLIER_interface.f' 2150 self.write_COLLIER_interface(writers.FortranWriter(filename), 2151 matrix_element) 2152 2153 filename = 'loop_num.f' 2154 self.write_loop_num(writers.FortranWriter(filename),\ 2155 matrix_element,OptimizedFortranModel) 2156 2157 filename = 'mp_compute_loop_coefs.f' 2158 self.write_mp_compute_loop_coefs(writers.FortranWriter(filename),\ 2159 matrix_element,OptimizedFortranModel) 2160 2161 if self.get_context(matrix_element)['ComputeColorFlows']: 2162 filename = 'compute_color_flows.f' 2163 self.write_compute_color_flows(writers.FortranWriter(filename), 2164 matrix_element, config_map = config_map) 2165 2166 # Extract number of external particles 2167 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 2168 filename = 'nexternal.inc' 2169 self.write_nexternal_file(writers.FortranWriter(filename), 2170 nexternal, ninitial) 2171 2172 # Write general process information 2173 filename = 'process_info.inc' 2174 self.write_process_info_file(writers.FortranWriter(filename), 2175 matrix_element) 2176 2177 if self.get_context(matrix_element)['TIRCaching']: 2178 filename = 'tir_cache_size.inc' 2179 self.write_tir_cache_size_include(writers.FortranWriter(filename)) 2180 2181 return calls
2182
2183 - def set_optimized_output_specific_replace_dict_entries(self, matrix_element):
2184 """ Specify the entries of the replacement dictionary which are specific 2185 to the optimized output and only relevant to it (the more general entries 2186 are set in the the mother class LoopProcessExporterFortranSA.""" 2187 2188 max_loop_rank=matrix_element.get_max_loop_rank() 2189 matrix_element.rep_dict['maxrank']=max_loop_rank 2190 matrix_element.rep_dict['loop_max_coefs']=\ 2191 q_polynomial.get_number_of_coefs_for_rank(max_loop_rank) 2192 max_loop_vertex_rank=matrix_element.get_max_loop_vertex_rank() 2193 matrix_element.rep_dict['vertex_max_coefs']=\ 2194 q_polynomial.get_number_of_coefs_for_rank(max_loop_vertex_rank) 2195 2196 matrix_element.rep_dict['nloopwavefuncs']=\ 2197 matrix_element.get_number_of_loop_wavefunctions() 2198 max_spin=matrix_element.get_max_loop_particle_spin() 2199 2200 matrix_element.rep_dict['max_lwf_size']= 4 if max_spin <=3 else 16 2201 matrix_element.rep_dict['nloops']=len(\ 2202 [1 for ldiag in matrix_element.get_loop_diagrams() for \ 2203 lamp in ldiag.get_loop_amplitudes()]) 2204 2205 if self.set_group_loops(matrix_element): 2206 matrix_element.rep_dict['nloop_groups']=\ 2207 len(matrix_element.get('loop_groups')) 2208 else: 2209 matrix_element.rep_dict['nloop_groups']=\ 2210 matrix_element.rep_dict['nloops']
2211
2212 - def write_loop_num(self, writer, matrix_element,fortran_model):
2213 """ Create the file containing the core subroutine called by CutTools 2214 which contains the Helas calls building the loop""" 2215 2216 replace_dict=copy.copy(matrix_element.rep_dict) 2217 2218 file = open(os.path.join(self.template_dir,'loop_num.inc')).read() 2219 file = file % replace_dict 2220 writer.writelines(file,context=self.get_context(matrix_element))
2221
2222 - def write_CT_interface(self, writer, matrix_element):
2223 """ We can re-use the mother one for the loop optimized output.""" 2224 LoopProcessExporterFortranSA.write_CT_interface(\ 2225 self, writer, matrix_element,optimized_output=True)
2226
2227 - def write_TIR_interface(self, writer, matrix_element):
2228 """ Create the file TIR_interface.f which does NOT contain the subroutine 2229 defining the loop HELAS-like calls along with the general interfacing 2230 subroutine. """ 2231 2232 # First write TIR_interface which interfaces MG5 with TIR. 2233 replace_dict=copy.copy(matrix_element.rep_dict) 2234 2235 file = open(os.path.join(self.template_dir,'TIR_interface.inc')).read() 2236 2237 # Check which loops have an Higgs effective vertex so as to correctly 2238 # implement CutTools limitation 2239 loop_groups = matrix_element.get('loop_groups') 2240 has_HEFT_vertex = [False]*len(loop_groups) 2241 for i, (denom_structure, loop_amp_list) in enumerate(loop_groups): 2242 for lamp in loop_amp_list: 2243 final_lwf = lamp.get_final_loop_wavefunction() 2244 while not final_lwf is None: 2245 # We define here an HEFT vertex as any vertex built up from 2246 # only massless vectors and scalars (at least one of each) 2247 scalars = len([1 for wf in final_lwf.get('mothers') if 2248 wf.get('spin')==1]) 2249 vectors = len([1 for wf in final_lwf.get('mothers') if 2250 wf.get('spin')==3 and wf.get('mass')=='ZERO']) 2251 if scalars>=1 and vectors>=1 and \ 2252 scalars+vectors == len(final_lwf.get('mothers')): 2253 has_HEFT_vertex[i] = True 2254 break 2255 final_lwf = final_lwf.get_loop_mother() 2256 else: 2257 continue 2258 break 2259 2260 has_HEFT_list = [] 2261 chunk_size = 9 2262 for k in xrange(0, len(has_HEFT_vertex), chunk_size): 2263 has_HEFT_list.append("DATA (HAS_AN_HEFT_VERTEX(I),I=%6r,%6r) /%s/" % \ 2264 (k + 1, min(k + chunk_size, len(has_HEFT_vertex)), 2265 ','.join(['.TRUE.' if l else '.FALSE.' for l in 2266 has_HEFT_vertex[k:k + chunk_size]]))) 2267 replace_dict['has_HEFT_list'] = '\n'.join(has_HEFT_list) 2268 2269 file = file % replace_dict 2270 2271 FPR = q_polynomial.FortranPolynomialRoutines( 2272 replace_dict['maxrank'],coef_format=replace_dict['complex_dp_format'],\ 2273 sub_prefix=replace_dict['proc_prefix']) 2274 if self.tir_available_dict['pjfry']: 2275 file += '\n\n'+FPR.write_pjfry_mapping() 2276 if self.tir_available_dict['iregi']: 2277 file += '\n\n'+FPR.write_iregi_mapping() 2278 2279 if writer: 2280 writer.writelines(file,context=self.get_context(matrix_element)) 2281 else: 2282 return file
2283
2284 - def write_COLLIER_interface(self, writer, matrix_element):
2285 """ Create the file COLLIER_interface.f""" 2286 2287 # First write GOLEM_interface which interfaces MG5 with TIR. 2288 replace_dict=copy.copy(matrix_element.rep_dict) 2289 2290 file = open(os.path.join(self.template_dir,'COLLIER_interface.inc')).read() 2291 2292 FPR = q_polynomial.FortranPolynomialRoutines(replace_dict['maxrank'],\ 2293 coef_format=replace_dict['complex_dp_format'],\ 2294 sub_prefix=replace_dict['proc_prefix']) 2295 map_definition = [] 2296 collier_map = FPR.get_COLLIER_mapping() 2297 2298 chunk_size = 10 2299 for map_name, indices_list in \ 2300 [('COEFMAP_ZERO',[c[0] for c in collier_map]), 2301 ('COEFMAP_ONE',[c[1] for c in collier_map]), 2302 ('COEFMAP_TWO',[c[2] for c in collier_map]), 2303 ('COEFMAP_THREE',[c[3] for c in collier_map])]: 2304 for k in xrange(0, len(indices_list), chunk_size): 2305 map_definition.append("DATA (%s(I),I=%3r,%3r) /%s/" % \ 2306 (map_name,k, min(k + chunk_size, len(indices_list))-1, 2307 ','.join('%2r'%ind for ind in indices_list[k:k + chunk_size]))) 2308 2309 replace_dict['collier_coefmap'] = '\n'.join(map_definition) 2310 2311 file = file % replace_dict 2312 2313 if writer: 2314 writer.writelines(file,context=self.get_context(matrix_element)) 2315 else: 2316 return file
2317
2318 - def write_GOLEM_interface(self, writer, matrix_element):
2319 """ Create the file GOLEM_interface.f which does NOT contain the subroutine 2320 defining the loop HELAS-like calls along with the general interfacing 2321 subroutine. """ 2322 2323 # First write GOLEM_interface which interfaces MG5 with TIR. 2324 replace_dict=copy.copy(matrix_element.rep_dict) 2325 2326 # We finalize TIR result differently wether we used the built-in 2327 # squaring against the born. 2328 if not self.get_context(matrix_element)['AmplitudeReduction']: 2329 replace_dict['loop_induced_sqsoindex']=',SQSOINDEX' 2330 else: 2331 replace_dict['loop_induced_sqsoindex']='' 2332 2333 file = open(os.path.join(self.template_dir,'GOLEM_interface.inc')).read() 2334 2335 file = file % replace_dict 2336 2337 FPR = q_polynomial.FortranPolynomialRoutines(replace_dict['maxrank'],\ 2338 coef_format=replace_dict['complex_dp_format'],\ 2339 sub_prefix=replace_dict['proc_prefix']) 2340 2341 file += '\n\n'+FPR.write_golem95_mapping() 2342 2343 if writer: 2344 writer.writelines(file,context=self.get_context(matrix_element)) 2345 else: 2346 return file
2347
2348 - def write_polynomial_subroutines(self,writer,matrix_element):
2349 """ Subroutine to create all the subroutines relevant for handling 2350 the polynomials representing the loop numerator """ 2351 2352 # First create 'loop_max_coefs.inc' 2353 IncWriter=writers.FortranWriter('loop_max_coefs.inc','w') 2354 IncWriter.writelines("""INTEGER LOOPMAXCOEFS 2355 PARAMETER (LOOPMAXCOEFS=%(loop_max_coefs)d)""" 2356 %matrix_element.rep_dict) 2357 2358 # Then coef_specs directly in DHELAS if it does not exist already 2359 # 'coef_specs.inc'. If several processes exported different files there, 2360 # it is fine because the overall maximum value will overwrite it in the 2361 # end 2362 coef_specs_path = pjoin(self.dir_path, 'Source','DHELAS','coef_specs.inc') 2363 if not os.path.isfile(coef_specs_path): 2364 IncWriter=writers.FortranWriter(coef_specs_path,'w') 2365 IncWriter.writelines("""INTEGER MAXLWFSIZE 2366 PARAMETER (MAXLWFSIZE=%(max_lwf_size)d) 2367 INTEGER VERTEXMAXCOEFS 2368 PARAMETER (VERTEXMAXCOEFS=%(vertex_max_coefs)d)"""\ 2369 %matrix_element.rep_dict) 2370 IncWriter.close() 2371 2372 # List of all subroutines to place there 2373 subroutines=[] 2374 2375 # Start from the routine in the template 2376 replace_dict = copy.copy(matrix_element.rep_dict) 2377 2378 dp_routine = open(os.path.join(self.template_dir,'polynomial.inc')).read() 2379 mp_routine = open(os.path.join(self.template_dir,'polynomial.inc')).read() 2380 # The double precision version of the basic polynomial routines, such as 2381 # create_loop_coefs 2382 replace_dict['complex_format'] = replace_dict['complex_dp_format'] 2383 replace_dict['real_format'] = replace_dict['real_dp_format'] 2384 replace_dict['mp_prefix'] = '' 2385 replace_dict['kind'] = 8 2386 replace_dict['zero_def'] = '0.0d0' 2387 replace_dict['one_def'] = '1.0d0' 2388 dp_routine = dp_routine % replace_dict 2389 # The quadruple precision version of the basic polynomial routines 2390 replace_dict['complex_format'] = replace_dict['complex_mp_format'] 2391 replace_dict['real_format'] = replace_dict['real_mp_format'] 2392 replace_dict['mp_prefix'] = 'MP_' 2393 replace_dict['kind'] = 16 2394 replace_dict['zero_def'] = '0.0e0_16' 2395 replace_dict['one_def'] = '1.0e0_16' 2396 mp_routine = mp_routine % replace_dict 2397 subroutines.append(dp_routine) 2398 subroutines.append(mp_routine) 2399 2400 # Initialize the polynomial routine writer 2401 poly_writer=q_polynomial.FortranPolynomialRoutines( 2402 matrix_element.get_max_loop_rank(), 2403 updater_max_rank = matrix_element.get_max_loop_vertex_rank(), 2404 sub_prefix=replace_dict['proc_prefix'], 2405 proc_prefix=replace_dict['proc_prefix'], 2406 mp_prefix='') 2407 # Write the polynomial constant module common to all 2408 writer.writelines(poly_writer.write_polynomial_constant_module()+'\n') 2409 2410 mp_poly_writer=q_polynomial.FortranPolynomialRoutines( 2411 matrix_element.get_max_loop_rank(), 2412 updater_max_rank = matrix_element.get_max_loop_vertex_rank(), 2413 coef_format='complex*32', sub_prefix='MP_'+replace_dict['proc_prefix'], 2414 proc_prefix=replace_dict['proc_prefix'], mp_prefix='MP_') 2415 # The eval subroutine 2416 subroutines.append(poly_writer.write_polynomial_evaluator()) 2417 subroutines.append(mp_poly_writer.write_polynomial_evaluator()) 2418 # The add coefs subroutine 2419 subroutines.append(poly_writer.write_add_coefs()) 2420 subroutines.append(mp_poly_writer.write_add_coefs()) 2421 # The merging one for creating the loop coefficients 2422 subroutines.append(poly_writer.write_wl_merger()) 2423 subroutines.append(mp_poly_writer.write_wl_merger()) 2424 for wl_update in matrix_element.get_used_wl_updates(): 2425 # We pick here the most appropriate way of computing the 2426 # tensor product depending on the rank of the two tensors. 2427 # The various choices below come out from a careful comparison of 2428 # the different methods using the valgrind profiler 2429 if wl_update[0]==wl_update[1]==1 or wl_update[0]==0 or wl_update[1]==0: 2430 # If any of the rank is 0, or if they are both equal to 1, 2431 # then we are better off using the full expanded polynomial, 2432 # and let the compiler optimize it. 2433 subroutines.append(poly_writer.write_expanded_wl_updater(\ 2434 wl_update[0],wl_update[1])) 2435 subroutines.append(mp_poly_writer.write_expanded_wl_updater(\ 2436 wl_update[0],wl_update[1])) 2437 elif wl_update[0] >= wl_update[1]: 2438 # If the loop polynomial is larger then we will filter and loop 2439 # over the vertex coefficients first. The smallest product for 2440 # which the routines below could be used is then 2441 # loop_rank_2 x vertex_rank_1 2442 subroutines.append(poly_writer.write_compact_wl_updater(\ 2443 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=True)) 2444 subroutines.append(mp_poly_writer.write_compact_wl_updater(\ 2445 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=True)) 2446 else: 2447 # This happens only when the rank of the updater (vertex coef) 2448 # is larger than the one of the loop coef and none of them is 2449 # zero. This never happens in renormalizable theories but it 2450 # can happen in the HEFT ones or other effective ones. In this 2451 # case the typicaly use of this routine if for the product 2452 # loop_rank_1 x vertex_rank_2 2453 subroutines.append(poly_writer.write_compact_wl_updater(\ 2454 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=False)) 2455 subroutines.append(mp_poly_writer.write_compact_wl_updater(\ 2456 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=False)) 2457 2458 writer.writelines('\n\n'.join(subroutines), 2459 context=self.get_context(matrix_element))
2460
2461 - def write_mp_compute_loop_coefs(self, writer, matrix_element, fortran_model):
2462 """Create the write_mp_compute_loop_coefs.f file.""" 2463 2464 if not matrix_element.get('processes') or \ 2465 not matrix_element.get('diagrams'): 2466 return 0 2467 2468 # Set lowercase/uppercase Fortran code 2469 2470 writers.FortranWriter.downcase = False 2471 2472 replace_dict = copy.copy(matrix_element.rep_dict) 2473 2474 # Extract helas calls 2475 squared_orders = matrix_element.get_squared_order_contribs() 2476 split_orders = matrix_element.get('processes')[0].get('split_orders') 2477 2478 born_ct_helas_calls , uvct_helas_calls = \ 2479 fortran_model.get_born_ct_helas_calls(matrix_element, 2480 squared_orders=squared_orders, split_orders=split_orders) 2481 self.turn_to_mp_calls(born_ct_helas_calls) 2482 self.turn_to_mp_calls(uvct_helas_calls) 2483 coef_construction, coef_merging = fortran_model.get_coef_construction_calls(\ 2484 matrix_element,group_loops=self.group_loops, 2485 squared_orders=squared_orders,split_orders=split_orders) 2486 # The proc_prefix must be replaced 2487 coef_construction = [c % matrix_element.rep_dict for c 2488 in coef_construction] 2489 self.turn_to_mp_calls(coef_construction) 2490 self.turn_to_mp_calls(coef_merging) 2491 2492 file = open(os.path.join(self.template_dir,\ 2493 'mp_compute_loop_coefs.inc')).read() 2494 2495 # Setup the contextual environment which is used in the splitting 2496 # functions below 2497 context = self.get_context(matrix_element) 2498 file=self.split_HELASCALLS(writer,replace_dict,\ 2499 'mp_helas_calls_split.inc',file,born_ct_helas_calls,\ 2500 'mp_born_ct_helas_calls','mp_helas_calls_ampb', 2501 required_so_broadcaster = 'MP_CT_REQ_SO_DONE', 2502 continue_label = 2000, 2503 momenta_array_name = 'MP_P', 2504 context=context) 2505 file=self.split_HELASCALLS(writer,replace_dict,\ 2506 'mp_helas_calls_split.inc',file,uvct_helas_calls,\ 2507 'mp_uvct_helas_calls','mp_helas_calls_uvct', 2508 required_so_broadcaster = 'MP_UVCT_REQ_SO_DONE', 2509 continue_label = 3000, 2510 momenta_array_name = 'MP_P', 2511 context=context) 2512 file=self.split_HELASCALLS(writer,replace_dict,\ 2513 'mp_helas_calls_split.inc',file,coef_construction,\ 2514 'mp_coef_construction','mp_coef_construction', 2515 required_so_broadcaster = 'MP_LOOP_REQ_SO_DONE', 2516 continue_label = 4000, 2517 momenta_array_name = 'MP_P', 2518 context=context) 2519 2520 replace_dict['mp_coef_merging']='\n'.join(coef_merging) 2521 2522 file = file % replace_dict 2523 2524 # Write the file 2525 writer.writelines(file,context=context)
2526
2527 - def write_color_matrix_data_file(self, writer, col_matrix):
2528 """Writes out the files (Loop|Born)ColorFlowMatrix.dat corresponding 2529 to the color coefficients for JAMP(L|B)*JAMP(L|B).""" 2530 2531 res = [] 2532 for line in range(len(col_matrix._col_basis1)): 2533 numerators = [] 2534 denominators = [] 2535 for row in range(len(col_matrix._col_basis2)): 2536 coeff = col_matrix.col_matrix_fixed_Nc[(line,row)] 2537 numerators.append('%6r'%coeff[0].numerator) 2538 denominators.append('%6r'%( 2539 coeff[0].denominator*(-1 if coeff[1] else 1))) 2540 res.append(' '.join(numerators)) 2541 res.append(' '.join(denominators)) 2542 2543 res.append('EOF') 2544 2545 writer.writelines('\n'.join(res))
2546
2547 - def write_color_flow_coefs_data_file(self, writer, color_amplitudes, 2548 color_basis):
2549 """ Writes the file '(Loop|Born)ColorFlowCoefs.dat using the coefficients 2550 list of the color_amplitudes in the argument of this function.""" 2551 2552 my_cs = color.ColorString() 2553 2554 res = [] 2555 2556 for jamp_number, coeff_list in enumerate(color_amplitudes): 2557 my_cs.from_immutable(sorted(color_basis.keys())[jamp_number]) 2558 # Order the ColorString so that its ordering is canonical. 2559 ordered_cs = color.ColorFactor([my_cs]).full_simplify()[0] 2560 res.append('%d # Coefficient for flow number %d with expr. %s'\ 2561 %(len(coeff_list), jamp_number+1, repr(ordered_cs))) 2562 # A line element is a tuple (numerator, denominator, amplitude_id) 2563 line_element = [] 2564 2565 for (coefficient, amp_number) in coeff_list: 2566 coef = self.cat_coeff(\ 2567 coefficient[0],coefficient[1],coefficient[2],coefficient[3]) 2568 line_element.append((coef[0].numerator, 2569 coef[0].denominator*(-1 if coef[1] else 1),amp_number)) 2570 # Sort them by growing amplitude number 2571 line_element.sort(key=lambda el:el[2]) 2572 2573 for i in range(3): 2574 res.append(' '.join('%6r'%elem[i] for elem in line_element)) 2575 2576 res.append('EOF') 2577 writer.writelines('\n'.join(res))
2578
2579 - def write_compute_color_flows(self, writer, matrix_element, config_map):
2580 """Writes the file compute_color_flows.f which uses the AMPL results 2581 from a common block to project them onto the color flow space so as 2582 to compute the JAMP quantities. For loop induced processes, this file 2583 will also contain a subroutine computing AMPL**2 for madevent 2584 multichanneling.""" 2585 2586 loop_col_amps = matrix_element.get_loop_color_amplitudes() 2587 matrix_element.rep_dict['nLoopFlows'] = len(loop_col_amps) 2588 2589 dat_writer = open(pjoin('..','MadLoop5_resources', 2590 '%(proc_prefix)sLoopColorFlowCoefs.dat' 2591 %matrix_element.rep_dict),'w') 2592 self.write_color_flow_coefs_data_file(dat_writer, 2593 loop_col_amps, matrix_element.get('loop_color_basis')) 2594 dat_writer.close() 2595 2596 dat_writer = open(pjoin('..','MadLoop5_resources', 2597 '%(proc_prefix)sLoopColorFlowMatrix.dat' 2598 %matrix_element.rep_dict),'w') 2599 self.write_color_matrix_data_file(dat_writer, 2600 matrix_element.get('color_matrix')) 2601 dat_writer.close() 2602 2603 if matrix_element.get('processes')[0].get('has_born'): 2604 born_col_amps = matrix_element.get_born_color_amplitudes() 2605 matrix_element.rep_dict['nBornFlows'] = len(born_col_amps) 2606 dat_writer = open(pjoin('..','MadLoop5_resources', 2607 '%(proc_prefix)sBornColorFlowCoefs.dat' 2608 %matrix_element.rep_dict),'w') 2609 self.write_color_flow_coefs_data_file(dat_writer, 2610 born_col_amps, matrix_element.get('born_color_basis')) 2611 dat_writer.close() 2612 2613 dat_writer = open(pjoin('..','MadLoop5_resources', 2614 '%(proc_prefix)sBornColorFlowMatrix.dat' 2615 %matrix_element.rep_dict),'w') 2616 self.write_color_matrix_data_file(dat_writer, 2617 color_amp.ColorMatrix(matrix_element.get('born_color_basis'))) 2618 dat_writer.close() 2619 else: 2620 matrix_element.rep_dict['nBornFlows'] = 0 2621 2622 replace_dict = copy.copy(matrix_element.rep_dict) 2623 2624 # The following variables only have to be defined for the LoopInduced 2625 # output for madevent. 2626 if self.get_context(matrix_element)['MadEventOutput']: 2627 self.get_amp2_lines(matrix_element, replace_dict, config_map) 2628 else: 2629 replace_dict['config_map_definition'] = '' 2630 replace_dict['config_index_map_definition'] = '' 2631 replace_dict['nmultichannels'] = 0 2632 replace_dict['nmultichannel_configs'] = 0 2633 2634 # The nmultichannels entry will be used in the matrix<i> wrappers as 2635 # well, so we add it to the general_replace_dict too. 2636 matrix_element.rep_dict['nmultichannels'] = \ 2637 replace_dict['nmultichannels'] 2638 matrix_element.rep_dict['nmultichannel_configs'] = \ 2639 replace_dict['nmultichannel_configs'] 2640 2641 2642 file = open(os.path.join(self.template_dir,\ 2643 'compute_color_flows.inc')).read()%replace_dict 2644 2645 writer.writelines(file,context=self.get_context(matrix_element))
2646
2647 - def write_global_specs(self, matrix_element_list, output_path=None):
2648 """ From the list of matrix element, or the single matrix element, derive 2649 the global quantities to write in global_coef_specs.inc""" 2650 2651 if isinstance(matrix_element_list, (group_subprocs.SubProcessGroupList, 2652 loop_helas_objects.LoopHelasProcess)): 2653 matrix_element_list = matrix_element_list.get_matrix_elements() 2654 2655 if isinstance(matrix_element_list, list): 2656 me_list = matrix_element_list 2657 else: 2658 me_list = [matrix_element_list] 2659 2660 if output_path is None: 2661 out_path = pjoin(self.dir_path,'SubProcesses','global_specs.inc') 2662 else: 2663 out_path = output_path 2664 2665 open(out_path,'w').write( 2666 """ integer MAXNEXTERNAL 2667 parameter(MAXNEXTERNAL=%d) 2668 integer OVERALLMAXRANK 2669 parameter(OVERALLMAXRANK=%d) 2670 integer NPROCS 2671 parameter(NPROCS=%d)"""%( 2672 max(me.get_nexternal_ninitial()[0] for me in me_list), 2673 max(me.get_max_loop_rank() for me in me_list), 2674 len(me_list)))
2675 2676
2677 - def fix_coef_specs(self, overall_max_lwf_spin, overall_max_loop_vert_rank):
2678 """ If processes with different maximum loop wavefunction size or 2679 different maximum loop vertex rank have to be output together, then 2680 the file 'coef.inc' in the HELAS Source folder must contain the overall 2681 maximum of these quantities. It is not safe though, and the user has 2682 been appropriatly warned at the output stage """ 2683 2684 # Remove the existing link 2685 coef_specs_path=os.path.join(self.dir_path,'Source','DHELAS',\ 2686 'coef_specs.inc') 2687 os.remove(coef_specs_path) 2688 2689 spin_to_wf_size = {1:4,2:4,3:4,4:16,5:16} 2690 overall_max_lwf_size = spin_to_wf_size[overall_max_lwf_spin] 2691 overall_max_loop_vert_coefs = q_polynomial.get_number_of_coefs_for_rank( 2692 overall_max_loop_vert_rank) 2693 # Replace it by the appropriate value 2694 IncWriter=writers.FortranWriter(coef_specs_path,'w') 2695 IncWriter.writelines("""INTEGER MAXLWFSIZE 2696 PARAMETER (MAXLWFSIZE=%(max_lwf_size)d) 2697 INTEGER VERTEXMAXCOEFS 2698 PARAMETER (VERTEXMAXCOEFS=%(vertex_max_coefs)d)"""\ 2699 %{'max_lwf_size':overall_max_lwf_size, 2700 'vertex_max_coefs':overall_max_loop_vert_coefs}) 2701 IncWriter.close()
2702
2703 - def setup_check_sa_replacement_dictionary(self, matrix_element, \ 2704 split_orders,squared_orders,amps_orders):
2705 """ Sets up the replacement dictionary for the writeout of the steering 2706 file check_sa.f""" 2707 if len(squared_orders)<1: 2708 matrix_element.rep_dict['print_so_loop_results']=\ 2709 "write(*,*) 'No split orders defined.'" 2710 elif len(squared_orders)==1: 2711 matrix_element.rep_dict['set_coupling_target']='' 2712 matrix_element.rep_dict['print_so_loop_results']=\ 2713 "write(*,*) 'All loop contributions are of split orders (%s)'"%( 2714 ' '.join(['%s=%d'%(split_orders[i],squared_orders[0][i]) \ 2715 for i in range(len(split_orders))])) 2716 else: 2717 matrix_element.rep_dict['set_coupling_target']='\n'.join([ 2718 '# Here we leave the default target squared split order to -1, meaning that we'+ 2719 ' aim at computing all individual contributions. You can choose otherwise.', 2720 'call %(proc_prefix)sSET_COUPLINGORDERS_TARGET(-1)'%matrix_element.rep_dict]) 2721 matrix_element.rep_dict['print_so_loop_results'] = '\n'.join([ 2722 '\n'.join(["write(*,*) '%dL) Loop ME for orders (%s) :'"%((j+1),(' '.join( 2723 ['%s=%d'%(split_orders[i],so[i]) for i in range(len(split_orders))]))), 2724 "IF (PREC_FOUND(%d).NE.-1.0d0) THEN"%(j+1), 2725 "write(*,*) ' > accuracy = ',PREC_FOUND(%d)"%(j+1), 2726 "ELSE", 2727 "write(*,*) ' > accuracy = NA'", 2728 "ENDIF", 2729 "write(*,*) ' > finite = ',MATELEM(1,%d)"%(j+1), 2730 "write(*,*) ' > 1eps = ',MATELEM(2,%d)"%(j+1), 2731 "write(*,*) ' > 2eps = ',MATELEM(3,%d)"%(j+1) 2732 ]) for j, so in enumerate(squared_orders)]) 2733 matrix_element.rep_dict['write_so_loop_results'] = '\n'.join( 2734 ["write (69,*) 'Split_Orders_Names %s'"%(' '.join(split_orders))]+ 2735 ['\n'.join([ 2736 "write (69,*) 'Loop_SO_Results %s'"%(' '.join( 2737 ['%d'%so_value for so_value in so])), 2738 "write (69,*) 'SO_Loop ACC ',PREC_FOUND(%d)"%(j+1), 2739 "write (69,*) 'SO_Loop FIN ',MATELEM(1,%d)"%(j+1), 2740 "write (69,*) 'SO_Loop 1EPS ',MATELEM(2,%d)"%(j+1), 2741 "write (69,*) 'SO_Loop 2EPS ',MATELEM(3,%d)"%(j+1), 2742 ]) for j, so in enumerate(squared_orders)]) 2743 2744 # We must reconstruct here the born squared orders. 2745 squared_born_so_orders = [] 2746 for i, amp_order in enumerate(amps_orders['born_amp_orders']): 2747 for j in range(0,i+1): 2748 key = tuple([ord1 + ord2 for ord1,ord2 in \ 2749 zip(amp_order[0],amps_orders['born_amp_orders'][j][0])]) 2750 if not key in squared_born_so_orders: 2751 squared_born_so_orders.append(key) 2752 if len(squared_born_so_orders)<1: 2753 matrix_element.rep_dict['print_so_born_results'] = '' 2754 elif len(squared_born_so_orders)==1: 2755 matrix_element.rep_dict['print_so_born_results'] = \ 2756 "write(*,*) 'All Born contributions are of split orders (%s)'"%( 2757 ' '.join(['%s=%d'%(split_orders[i],squared_born_so_orders[0][i]) 2758 for i in range(len(split_orders))])) 2759 else: 2760 matrix_element.rep_dict['print_so_born_results'] = '\n'.join([ 2761 "write(*,*) '%dB) Born ME for orders (%s) = ',MATELEM(0,%d)"%(j+1,' '.join( 2762 ['%s=%d'%(split_orders[i],so[i]) for i in range(len(split_orders))]),j+1) 2763 for j, so in enumerate(squared_born_so_orders)]) 2764 matrix_element.rep_dict['write_so_born_results'] = '\n'.join( 2765 ['\n'.join([ 2766 "write (69,*) 'Born_SO_Results %s'"%(' '.join( 2767 ['%d'%so_value for so_value in so])), 2768 "write (69,*) 'SO_Born BORN ',MATELEM(0,%d)"%(j+1), 2769 ]) for j, so in enumerate(squared_born_so_orders)]) 2770 2771 # Add a bottom bar to both print_so_[loop|born]_results 2772 matrix_element.rep_dict['print_so_born_results'] += \ 2773 '\nwrite (*,*) "---------------------------------"' 2774 matrix_element.rep_dict['print_so_loop_results'] += \ 2775 '\nwrite (*,*) "---------------------------------"'
2776
2777 - def write_tir_cache_size_include(self, writer):
2778 """Write the file 'tir_cache_size.inc' which sets the size of the TIR 2779 cache the the user wishes to employ and the default value for it. 2780 This can have an impact on MadLoop speed when using stability checks 2781 but also impacts in a non-negligible way MadLoop's memory footprint. 2782 It is therefore important that the user can chose its size.""" 2783 2784 # For the standalone optimized output, a size of one is necessary. 2785 # The MadLoop+MadEvent output sets it to 2 because it can gain further 2786 # speed increase with a TIR cache of size 2 due to the structure of the 2787 # calls to MadLoop there. 2788 tir_cach_size = "parameter(TIR_CACHE_SIZE=1)" 2789 writer.writelines(tir_cach_size)
2790
2791 - def write_loopmatrix(self, writer, matrix_element, fortran_model, \ 2792 write_auxiliary_files=True,):
2793 """Create the loop_matrix.f file.""" 2794 2795 if not matrix_element.get('processes') or \ 2796 not matrix_element.get('diagrams'): 2797 return 0 2798 2799 # Set lowercase/uppercase Fortran code 2800 writers.FortranWriter.downcase = False 2801 2802 # Starting off with the treatment of the split_orders since some 2803 # of the information extracted there will come into the 2804 # general_replace_dict. Split orders are abbreviated SO in all the 2805 # keys of the replacement dictionaries. 2806 2807 # Take care of the split_orders 2808 squared_orders, amps_orders = matrix_element.get_split_orders_mapping() 2809 # Creating here a temporary list containing only the information of 2810 # what are the different squared split orders contributing 2811 # (i.e. not using max_contrib_amp_number and max_contrib_ref_amp_number) 2812 sqso_contribs = [sqso[0] for sqso in squared_orders] 2813 split_orders = matrix_element.get('processes')[0].get('split_orders') 2814 # The entries set in the function below are only for check_sa written 2815 # out in write_loop__matrix_element_v4 (it is however placed here because the 2816 # split order information is only available here). 2817 self.setup_check_sa_replacement_dictionary(matrix_element, 2818 split_orders,sqso_contribs,amps_orders) 2819 2820 # Now recast the split order basis for the loop, born and counterterm 2821 # amplitude into one single splitorderbasis. 2822 overall_so_basis = list(set( 2823 [born_so[0] for born_so in amps_orders['born_amp_orders']]+ 2824 [born_so[0] for born_so in amps_orders['loop_amp_orders']])) 2825 # We must re-sort it to make sure it follows an increasing WEIGHT order 2826 order_hierarchy = matrix_element.get('processes')[0]\ 2827 .get('model').get('order_hierarchy') 2828 if set(order_hierarchy.keys()).union(set(split_orders))==\ 2829 set(order_hierarchy.keys()): 2830 overall_so_basis.sort(key= lambda so: 2831 sum([order_hierarchy[split_orders[i]]*order_power for \ 2832 i, order_power in enumerate(so)])) 2833 2834 # Those are additional entries used throughout the different files of 2835 # MadLoop5 2836 matrix_element.rep_dict['split_order_str_list'] = str(split_orders) 2837 matrix_element.rep_dict['nSO'] = len(split_orders) 2838 matrix_element.rep_dict['nSquaredSO'] = len(sqso_contribs) 2839 matrix_element.rep_dict['nAmpSO'] = len(overall_so_basis) 2840 2841 writers.FortranWriter('nsquaredSO.inc').writelines( 2842 """INTEGER NSQUAREDSO 2843 PARAMETER (NSQUAREDSO=%d)"""%matrix_element.rep_dict['nSquaredSO']) 2844 2845 replace_dict = copy.copy(matrix_element.rep_dict) 2846 # Build the general array mapping the split orders indices to their 2847 # definition 2848 replace_dict['ampsplitorders'] = '\n'.join(self.get_split_orders_lines(\ 2849 overall_so_basis,'AMPSPLITORDERS')) 2850 replace_dict['SquaredSO'] = '\n'.join(self.get_split_orders_lines(\ 2851 sqso_contribs,'SQPLITORDERS')) 2852 2853 # Specify what are the squared split orders selected by the proc def. 2854 replace_dict['chosen_so_configs'] = self.set_chosen_SO_index( 2855 matrix_element.get('processes')[0],sqso_contribs) 2856 2857 # Now we build the different arrays storing the split_orders ID of each 2858 # amp. 2859 ampSO_list=[-1]*sum(len(el[1]) for el in amps_orders['loop_amp_orders']) 2860 for SO in amps_orders['loop_amp_orders']: 2861 for amp_number in SO[1]: 2862 ampSO_list[amp_number-1]=overall_so_basis.index(SO[0])+1 2863 2864 replace_dict['loopAmpSO'] = '\n'.join(self.format_integer_list( 2865 ampSO_list,'LOOPAMPORDERS')) 2866 ampSO_list=[-1]*sum(len(el[1]) for el in amps_orders['born_amp_orders']) 2867 for SO in amps_orders['born_amp_orders']: 2868 for amp_number in SO[1]: 2869 ampSO_list[amp_number-1]=overall_so_basis.index(SO[0])+1 2870 replace_dict['BornAmpSO'] = '\n'.join(self.format_integer_list( 2871 ampSO_list,'BORNAMPORDERS')) 2872 2873 # We then go to the TIR setup 2874 # The first entry is the CutTools, we make sure it is available 2875 looplibs_av=['.TRUE.'] 2876 # one should be careful about the order in the following as it must match 2877 # the ordering in MadLoopParamsCard. 2878 for tir_lib in ['pjfry','iregi','golem','samurai','ninja','collier']: 2879 looplibs_av.append('.TRUE.' if tir_lib in self.all_tir and \ 2880 self.tir_available_dict[tir_lib] else '.FALSE.') 2881 replace_dict['data_looplibs_av']=','.join(looplibs_av) 2882 2883 # Helicity offset convention 2884 # For a given helicity, the attached integer 'i' means 2885 # 'i' in ]-inf;-HELOFFSET[ -> Helicity is equal, up to a sign, 2886 # to helicity number abs(i+HELOFFSET) 2887 # 'i' == -HELOFFSET -> Helicity is analytically zero 2888 # 'i' in ]-HELOFFSET,inf[ -> Helicity is contributing with weight 'i'. 2889 # If it is zero, it is skipped. 2890 # Typically, the hel_offset is 10000 2891 replace_dict['hel_offset'] = 10000 2892 2893 # Extract overall denominator 2894 # Averaging initial state color, spin, and identical FS particles 2895 den_factor_line = self.get_den_factor_line(matrix_element) 2896 replace_dict['den_factor_line'] = den_factor_line 2897 2898 # When the user asks for the polarized matrix element we must 2899 # multiply back by the helicity averaging factor 2900 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor() 2901 replace_dict['beamone_helavgfactor'], replace_dict['beamtwo_helavgfactor'] =\ 2902 matrix_element.get_beams_hel_avg_factor() 2903 2904 if write_auxiliary_files: 2905 # Write out the color matrix 2906 (CMNum,CMDenom) = self.get_color_matrix(matrix_element) 2907 CMWriter=open(pjoin('..','MadLoop5_resources', 2908 '%(proc_prefix)sColorNumFactors.dat'%matrix_element.rep_dict),'w') 2909 for ColorLine in CMNum: 2910 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 2911 CMWriter.close() 2912 CMWriter=open(pjoin('..','MadLoop5_resources', 2913 '%(proc_prefix)sColorDenomFactors.dat'%matrix_element.rep_dict),'w') 2914 for ColorLine in CMDenom: 2915 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 2916 CMWriter.close() 2917 2918 # Write out the helicity configurations 2919 HelConfigs=matrix_element.get_helicity_matrix() 2920 HelConfigWriter=open(pjoin('..','MadLoop5_resources', 2921 '%(proc_prefix)sHelConfigs.dat'%matrix_element.rep_dict),'w') 2922 for HelConfig in HelConfigs: 2923 HelConfigWriter.write(' '.join(['%d'%H for H in HelConfig])+'\n') 2924 HelConfigWriter.close() 2925 2926 # Extract helas calls 2927 born_ct_helas_calls, uvct_helas_calls = \ 2928 fortran_model.get_born_ct_helas_calls(matrix_element, 2929 squared_orders=squared_orders,split_orders=split_orders) 2930 coef_construction, coef_merging = fortran_model.get_coef_construction_calls(\ 2931 matrix_element,group_loops=self.group_loops, 2932 squared_orders=squared_orders,split_orders=split_orders) 2933 2934 loop_CT_calls = fortran_model.get_loop_CT_calls(matrix_element,\ 2935 group_loops=self.group_loops, 2936 squared_orders=squared_orders, split_orders=split_orders) 2937 # The proc_prefix must be replaced 2938 coef_construction = [c % matrix_element.rep_dict for c 2939 in coef_construction] 2940 loop_CT_calls = [lc % matrix_element.rep_dict for lc in loop_CT_calls] 2941 2942 file = open(os.path.join(self.template_dir,\ 2943 'loop_matrix_standalone.inc')).read() 2944 2945 # Setup the contextual environment which is used in the splitting 2946 # functions below 2947 context = self.get_context(matrix_element) 2948 file=self.split_HELASCALLS(writer,replace_dict,\ 2949 'helas_calls_split.inc',file,born_ct_helas_calls,\ 2950 'born_ct_helas_calls','helas_calls_ampb', 2951 required_so_broadcaster = 'CT_REQ_SO_DONE', 2952 continue_label = 2000, context = context) 2953 file=self.split_HELASCALLS(writer,replace_dict,\ 2954 'helas_calls_split.inc',file,uvct_helas_calls,\ 2955 'uvct_helas_calls','helas_calls_uvct', 2956 required_so_broadcaster = 'UVCT_REQ_SO_DONE', 2957 continue_label = 3000, context=context) 2958 file=self.split_HELASCALLS(writer,replace_dict,\ 2959 'helas_calls_split.inc',file,coef_construction,\ 2960 'coef_construction','coef_construction', 2961 required_so_broadcaster = 'LOOP_REQ_SO_DONE', 2962 continue_label = 4000, context=context) 2963 file=self.split_HELASCALLS(writer,replace_dict,\ 2964 'helas_calls_split.inc',file,loop_CT_calls,\ 2965 'loop_CT_calls','loop_CT_calls', 2966 required_so_broadcaster = 'CTCALL_REQ_SO_DONE', 2967 continue_label = 5000, context=context) 2968 2969 # Add the entries above to the general_replace_dict so that it can be 2970 # used by write_mp_compute_loop_coefs later 2971 matrix_element.rep_dict['loop_CT_calls']=replace_dict['loop_CT_calls'] 2972 matrix_element.rep_dict['born_ct_helas_calls']=replace_dict['born_ct_helas_calls'] 2973 matrix_element.rep_dict['uvct_helas_calls']=replace_dict['uvct_helas_calls'] 2974 matrix_element.rep_dict['coef_construction']=replace_dict['coef_construction'] 2975 2976 replace_dict['coef_merging']='\n'.join(coef_merging) 2977 2978 file = file % replace_dict 2979 number_of_calls = len(filter(lambda call: call.find('CALL LOOP') != 0, \ 2980 loop_CT_calls)) 2981 if writer: 2982 # Write the file 2983 writer.writelines(file,context=context) 2984 return number_of_calls 2985 else: 2986 # Return it to be written along with the others 2987 return number_of_calls, file
2988 2989 #=============================================================================== 2990 # LoopProcessExporterFortranSA 2991 #===============================================================================
2992 -class LoopProcessExporterFortranMatchBox(LoopProcessOptimizedExporterFortranSA, 2993 export_v4.ProcessExporterFortranMatchBox):
2994 """Class to take care of exporting a set of loop matrix elements in the 2995 Fortran format.""" 2996 2997 default_opt = {'clean': False, 'complex_mass':False, 2998 'export_format':'madloop_matchbox', 'mp':True, 2999 'loop_dir':'', 'cuttools_dir':'', 3000 'fortran_compiler':'gfortran', 3001 'output_dependencies':'external', 3002 'sa_symmetry':True} 3003 3004 3005
3006 - def get_color_string_lines(self, matrix_element):
3007 """Return the color matrix definition lines for this matrix element. Split 3008 rows in chunks of size n.""" 3009 3010 return export_v4.ProcessExporterFortranMatchBox.get_color_string_lines(matrix_element)
3011 3012
3013 - def get_JAMP_lines(self, *args, **opts):
3014 """Adding leading color part of the colorflow""" 3015 3016 return export_v4.ProcessExporterFortranMatchBox.get_JAMP_lines(self, *args, **opts)
3017
3018 - def get_ME_identifier(self, matrix_element, group_number = None, group_elem_number = None):
3019 """ To not mix notations between borns and virtuals we call it here also MG5 """ 3020 return 'MG5_%d_'%matrix_element.get('processes')[0].get('id')
3021 3022 3023 #=============================================================================== 3024 # LoopInducedExporter 3025 #===============================================================================
3026 -class LoopInducedExporterME(LoopProcessOptimizedExporterFortranSA):
3027 """ A class to specify all the functions common to LoopInducedExporterMEGroup 3028 and LoopInducedExporterMENoGroup (but not relevant for the original 3029 Madevent exporters)""" 3030 3031 madloop_makefile_name = 'makefile_MadLoop' 3032 3033
3034 - def __init__(self, *args, **opts):
3035 """ Initialize the process, setting the proc characteristics.""" 3036 super(LoopInducedExporterME, self).__init__(*args, **opts) 3037 self.proc_characteristic['loop_induced'] = True
3038
3039 - def get_context(self,*args,**opts):
3040 """ Make sure that the contextual variable MadEventOutput is set to 3041 True for this exporter""" 3042 3043 context = super(LoopInducedExporterME,self).get_context(*args,**opts) 3044 context['MadEventOutput'] = True 3045 return context
3046 3047 #=========================================================================== 3048 # write a procdef_mg5 (an equivalent of the MG4 proc_card.dat) 3049 #===========================================================================
3050 - def write_procdef_mg5(self, file_pos, modelname, process_str):
3051 """ write an equivalent of the MG4 proc_card in order that all the Madevent 3052 Perl script of MadEvent4 are still working properly for pure MG5 run. 3053 Not needed for StandAlone so we need to call the correct one 3054 """ 3055 3056 return export_v4.ProcessExporterFortranMEGroup.write_procdef_mg5( 3057 self, file_pos, modelname, process_str)
3058
3059 - def get_source_libraries_list(self):
3060 """ Returns the list of libraries to be compiling when compiling the 3061 SOURCE directory. It is different for loop_induced processes and 3062 also depends on the value of the 'output_dependencies' option""" 3063 3064 libraries_list = super(LoopInducedExporterME,self).\ 3065 get_source_libraries_list() 3066 3067 if self.dependencies=='internal': 3068 libraries_list.append('$(LIBDIR)libcts.$(libext)') 3069 libraries_list.append('$(LIBDIR)libiregi.$(libext)') 3070 3071 return libraries_list
3072 3079
3080 - def copy_template(self, *args, **opts):
3081 """Pick the right mother functions 3082 """ 3083 # Call specifically the necessary building functions for the mixed 3084 # template setup for both MadEvent and MadLoop standalone 3085 LoopProcessExporterFortranSA.loop_additional_template_setup(self, 3086 copy_Source_makefile=False) 3087 3088 LoopProcessOptimizedExporterFortranSA.\ 3089 loop_optimized_additional_template_setup(self)
3090 3091 3092 #=========================================================================== 3093 # Create jpeg diagrams, html pages,proc_card_mg5.dat and madevent.tar.gz 3094 #===========================================================================
3095 - def finalize(self, matrix_elements, history, mg5options, flaglist):
3096 """Function to finalize v4 directory, for inheritance. 3097 """ 3098 3099 self.proc_characteristic['loop_induced'] = True 3100 3101 # This can be uncommented if one desires to have the MadLoop 3102 # initialization performed at the end of the output phase. 3103 # Alternatively, one can simply execute the command 'initMadLoop' in 3104 # the madevent interactive interface after the output. 3105 # from madgraph.interface.madevent_interface import MadLoopInitializer 3106 # MadLoopInitializer.init_MadLoop(self.dir_path, 3107 # subproc_prefix=self.SubProc_prefix, MG_options=None) 3108 3109 self.write_global_specs(matrix_elements)
3110
3111 - def write_tir_cache_size_include(self, writer):
3112 """Write the file 'tir_cache_size.inc' which sets the size of the TIR 3113 cache the the user wishes to employ and the default value for it. 3114 This can have an impact on MadLoop speed when using stability checks 3115 but also impacts in a non-negligible way MadLoop's memory footprint. 3116 It is therefore important that the user can chose its size.""" 3117 3118 # In this case of MadLoop+MadEvent output, we set it to 2 because we 3119 # gain further speed increase with a TIR cache of size 2 due to the 3120 # the fact that we call MadLoop once per helicity configuration in this 3121 # case. 3122 tir_cach_size = "parameter(TIR_CACHE_SIZE=2)" 3123 writer.writelines(tir_cach_size)
3124
3125 - def write_matrix_element_v4(self, writer, matrix_element, fortran_model, 3126 proc_id = None, config_map = [], subproc_number = None):
3127 """ Write it the wrapper to call the ML5 subroutine in the library.""" 3128 3129 # Generating the MadEvent wrapping ME's routines 3130 if not matrix_element.get('processes') or \ 3131 not matrix_element.get('diagrams'): 3132 return 0 3133 3134 if not isinstance(writer, writers.FortranWriter): 3135 raise writers.FortranWriter.FortranWriterError(\ 3136 "writer not FortranWriter") 3137 3138 replace_dict = copy.copy(matrix_element.rep_dict) 3139 3140 # Extract version number and date from VERSION file 3141 info_lines = self.get_mg5_info_lines() 3142 replace_dict['info_lines'] = info_lines 3143 3144 # Extract process info lines 3145 process_lines = self.get_process_info_lines(matrix_element) 3146 replace_dict['process_lines'] = process_lines 3147 3148 # Set proc_id 3149 # It can be set to None when write_matrix_element_v4 is called without 3150 # grouping. In this case the subroutine SMATRIX should take an empty 3151 # suffix. 3152 if proc_id is None: 3153 replace_dict['proc_id'] = '' 3154 else: 3155 replace_dict['proc_id'] = proc_id 3156 3157 #set the average over the number of initial helicities 3158 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor() 3159 replace_dict['beamone_helavgfactor'], replace_dict['beamtwo_helavgfactor'] =\ 3160 matrix_element.get_beams_hel_avg_factor() 3161 3162 # Extract helicity lines 3163 helicity_lines = self.get_helicity_lines(matrix_element) 3164 replace_dict['helicity_lines'] = helicity_lines 3165 3166 3167 # Extract ndiags 3168 ndiags = len(matrix_element.get('diagrams')) 3169 replace_dict['ndiags'] = ndiags 3170 3171 # Set define_iconfigs_lines 3172 replace_dict['define_iconfigs_lines'] = \ 3173 """INTEGER MAPCONFIG(0:LMAXCONFIGS), ICONFIG 3174 COMMON/TO_MCONFIGS/MAPCONFIG, ICONFIG""" 3175 3176 if proc_id: 3177 # Set lines for subprocess group version 3178 # Set define_iconfigs_lines 3179 replace_dict['define_iconfigs_lines'] += \ 3180 """\nINTEGER SUBDIAG(MAXSPROC),IB(2) 3181 COMMON/TO_SUB_DIAG/SUBDIAG,IB""" 3182 # Set set_amp2_line 3183 replace_dict['configID_in_matrix'] = "SUBDIAG(%s)"%proc_id 3184 else: 3185 # Standard running 3186 # Set set_amp2_line 3187 replace_dict['configID_in_matrix'] = "MAPCONFIG(ICONFIG)" 3188 3189 # If group_numer 3190 replace_dict['ml_prefix'] = \ 3191 self.get_ME_identifier(matrix_element, subproc_number, proc_id) 3192 3193 # Extract ncolor 3194 ncolor = max(1, len(matrix_element.get('color_basis'))) 3195 replace_dict['ncolor'] = ncolor 3196 3197 n_tot_diags = len(matrix_element.get_loop_diagrams()) 3198 replace_dict['n_tot_diags'] = n_tot_diags 3199 3200 file = open(pjoin(_file_path, \ 3201 'iolibs/template_files/%s' % self.matrix_file)).read() 3202 file = file % replace_dict 3203 3204 # Write the file 3205 writer.writelines(file) 3206 3207 return 0, ncolor
3208
3209 - def get_amp2_lines(self, *args, **opts):
3210 """Make sure the function is implemented in the daughters""" 3211 3212 raise NotImplemented, 'The function get_amp2_lines must be called in '+\ 3213 ' the daugthers of LoopInducedExporterME'
3214 3215 #=============================================================================== 3216 # LoopInducedExporterMEGroup 3217 #===============================================================================
3218 -class LoopInducedExporterMEGroup(LoopInducedExporterME, 3219 export_v4.ProcessExporterFortranMEGroup):
3220 """Class to take care of exporting a set of grouped loop induced matrix 3221 elements""" 3222 3223 matrix_file = "matrix_loop_induced_madevent_group.inc" 3224 3230
3231 - def write_source_makefile(self, *args, **opts):
3232 """Pick the correct write_source_makefile function from 3233 ProcessExporterFortranMEGroup""" 3234 3235 export_v4.ProcessExporterFortranMEGroup.write_source_makefile(self, 3236 *args, **opts)
3237
3238 - def copy_template(self, *args, **opts):
3239 """Pick the right mother functions 3240 """ 3241 # Call specifically the necessary building functions for the mixed 3242 # template setup for both MadEvent and MadLoop standalone 3243 3244 # Start witht the MadEvent one 3245 export_v4.ProcessExporterFortranMEGroup.copy_template(self,*args,**opts) 3246 3247 # Then the MadLoop-standalone related one 3248 LoopInducedExporterME.copy_template(self, *args, **opts)
3249
3250 - def finalize(self, *args, **opts):
3251 """Pick the right mother functions 3252 """ 3253 # Call specifically what finalize_v4_directory must be used, so that the 3254 # MRO doesn't interfere. 3255 3256 self.proc_characteristic['loop_induced'] = True 3257 3258 export_v4.ProcessExporterFortranMEGroup.finalize(self,*args,**opts) 3259 3260 # And the finilize from LoopInducedExporterME which essentially takes 3261 # care of MadLoop virtuals initialization 3262 LoopInducedExporterME.finalize(self,*args,**opts)
3263
3264 - def generate_subprocess_directory(self, subproc_group, 3265 fortran_model,group_number):
3266 """Generate the Pn directory for a subprocess group in MadEvent, 3267 including the necessary matrix_N.f files, configs.inc and various 3268 other helper files""" 3269 3270 # Generate the MadLoop files 3271 calls = 0 3272 matrix_elements = subproc_group.get('matrix_elements') 3273 for ime, matrix_element in enumerate(matrix_elements): 3274 self.unique_id +=1 3275 calls += self.generate_loop_subprocess(matrix_element,fortran_model, 3276 group_number = group_number, proc_id = str(ime+1), 3277 # group_number = str(subproc_group.get('number')), proc_id = str(ime+1), 3278 config_map = subproc_group.get('diagram_maps')[ime], 3279 unique_id=self.unique_id) 3280 3281 # Then generate the MadEvent files 3282 export_v4.ProcessExporterFortranMEGroup.generate_subprocess_directory( 3283 self, subproc_group,fortran_model,group_number) 3284 3285 return calls
3286
3287 - def get_amp2_lines(self, matrix_element, replace_dict, config_map):
3288 """Return the various replacement dictionary inputs necessary for the 3289 multichanneling amp2 definition for the loop-induced MadEvent output. 3290 """ 3291 3292 if not config_map: 3293 raise MadGraph5Error, 'A multi-channeling configuration map is '+\ 3294 ' necessary for the MadEvent Loop-induced output with grouping.' 3295 3296 nexternal, ninitial = matrix_element.get_nexternal_ninitial() 3297 3298 ret_lines = [] 3299 # In this case, we need to sum up all amplitudes that have 3300 # identical topologies, as given by the config_map (which 3301 # gives the topology/config for each of the diagrams 3302 if isinstance(matrix_element, loop_helas_objects.LoopHelasMatrixElement): 3303 diagrams = matrix_element.get_loop_diagrams() 3304 else: 3305 diagrams = matrix_element.get('diagrams') 3306 3307 # Note that we need to use AMP2 number corresponding to the first 3308 # diagram number used for that AMP2. 3309 # The dictionary below maps the config ID to this corresponding first 3310 # diagram number 3311 config_index_map = {} 3312 # For each diagram number, the dictionary below gives the config_id it 3313 # belongs to or 0 if it doesn't belong to any. 3314 loop_amp_ID_to_config = {} 3315 3316 # Combine the diagrams with identical topologies 3317 config_to_diag_dict = {} 3318 for idiag, diag in enumerate(diagrams): 3319 try: 3320 config_to_diag_dict[config_map[idiag]].append(idiag) 3321 except KeyError: 3322 config_to_diag_dict[config_map[idiag]] = [idiag] 3323 3324 for config in sorted(config_to_diag_dict.keys()): 3325 config_index_map[config] = (config_to_diag_dict[config][0] + 1) 3326 3327 # First add the UV and R2 counterterm amplitudes of each selected 3328 # diagram for the multichannel config 3329 CT_amp_numbers = [a.get('number') for a in \ 3330 sum([diagrams[idiag].get_ct_amplitudes() for \ 3331 idiag in config_to_diag_dict[config]], [])] 3332 3333 for CT_amp_number in CT_amp_numbers: 3334 loop_amp_ID_to_config[CT_amp_number] = config 3335 3336 # Now add here the loop amplitudes. 3337 loop_amp_numbers = [a.get('amplitudes')[0].get('number') 3338 for a in sum([diagrams[idiag].get_loop_amplitudes() for \ 3339 idiag in config_to_diag_dict[config]], [])] 3340 3341 for loop_amp_number in loop_amp_numbers: 3342 loop_amp_ID_to_config[loop_amp_number] = config 3343 3344 # Notice that the config_id's are not necessarily sequential here, so 3345 # the size of the config_index_map array has to be the maximum over all 3346 # config_ids. 3347 # config_index_map should never be empty unless there was no diagram, 3348 # so the expression below is ok. 3349 n_configs = max(config_index_map.keys()) 3350 replace_dict['nmultichannel_configs'] = n_configs 3351 3352 # We must fill the empty entries of the map with the dummy amplitude 3353 # number 0. 3354 conf_list = [(config_index_map[i] if i in config_index_map else 0) \ 3355 for i in range(1,n_configs+1)] 3356 # Now the placeholder 'nmultichannels' refers to the number of 3357 # multi-channels which are contributing, so we must filter out zeros. 3358 replace_dict['nmultichannels'] = len([_ for _ in conf_list if _!=0]) 3359 3360 # Now write the amp2 related inputs in the replacement dictionary 3361 res_list = [] 3362 chunk_size = 6 3363 for k in xrange(0, len(conf_list), chunk_size): 3364 res_list.append("DATA (config_index_map(i),i=%6r,%6r) /%s/" % \ 3365 (k + 1, min(k + chunk_size, len(conf_list)), 3366 ','.join(["%6r" % i for i in conf_list[k:k + chunk_size]]))) 3367 3368 replace_dict['config_index_map_definition'] = '\n'.join(res_list) 3369 3370 res_list = [] 3371 n_loop_amps = max(loop_amp_ID_to_config.keys()) 3372 amp_list = [loop_amp_ID_to_config[i] for i in \ 3373 sorted(loop_amp_ID_to_config.keys()) if i!=0] 3374 chunk_size = 6 3375 for k in xrange(0, len(amp_list), chunk_size): 3376 res_list.append("DATA (CONFIG_MAP(i),i=%6r,%6r) /%s/" % \ 3377 (k + 1, min(k + chunk_size, len(amp_list)), 3378 ','.join(["%6r" % i for i in amp_list[k:k + chunk_size]]))) 3379 3380 replace_dict['config_map_definition'] = '\n'.join(res_list) 3381 3382 return
3383 3384 #=============================================================================== 3385 # LoopInducedExporterMENoGroup 3386 #===============================================================================
3387 -class LoopInducedExporterMENoGroup(LoopInducedExporterME, 3388 export_v4.ProcessExporterFortranME):
3389 """Class to take care of exporting a set of individual loop induced matrix 3390 elements""" 3391 3392 matrix_file = "matrix_loop_induced_madevent.inc" 3393 3399
3400 - def write_source_makefile(self, *args, **opts):
3401 """Pick the correct write_source_makefile function from 3402 ProcessExporterFortran""" 3403 3404 super(export_v4.ProcessExporterFortranME,self).\ 3405 write_source_makefile(*args, **opts)
3406
3407 - def copy_template(self, *args, **opts):
3408 """Pick the right mother functions 3409 """ 3410 # Call specifically the necessary building functions for the mixed 3411 # template setup for both MadEvent and MadLoop standalone 3412 3413 # Start witht the MadEvent one 3414 export_v4.ProcessExporterFortranME.copy_template(self,*args,**opts) 3415 3416 # Then the MadLoop-standalone related one 3417 LoopInducedExporterME.copy_template(self, *args, **opts)
3418
3419 - def finalize(self, *args, **opts):
3420 """Pick the right mother functions 3421 """ 3422 3423 self.proc_characteristic['loop_induced'] = True 3424 # Call specifically what finalize must be used, so that the 3425 # MRO doesn't interfere. 3426 export_v4.ProcessExporterFortranME.finalize(self, *args, **opts) 3427 3428 # And the finilize_v4 from LoopInducedExporterME which essentially takes 3429 # care of MadLoop virtuals initialization 3430 LoopInducedExporterME.finalize(self, *args, **opts)
3431
3432 - def generate_subprocess_directory(self, matrix_element, fortran_model, me_number):
3433 """Generate the Pn directory for a subprocess group in MadEvent, 3434 including the necessary matrix_N.f files, configs.inc and various 3435 other helper files""" 3436 3437 self.unique_id += 1 3438 # Then generate the MadLoop files 3439 calls = self.generate_loop_subprocess(matrix_element,fortran_model, 3440 group_number = me_number, 3441 unique_id=self.unique_id) 3442 3443 3444 # First generate the MadEvent files 3445 calls += export_v4.ProcessExporterFortranME.generate_subprocess_directory( 3446 self, matrix_element, fortran_model, me_number) 3447 return calls
3448
3449 - def get_amp2_lines(self, matrix_element, replace_dict, config_map):
3450 """Return the amp2(i) = sum(amp for diag(i))^2 lines""" 3451 3452 if config_map: 3453 raise MadGraph5Error, 'A configuration map should not be specified'+\ 3454 ' for the Loop induced exporter without grouping.' 3455 3456 nexternal, ninitial = matrix_element.get_nexternal_ninitial() 3457 # Get minimum legs in a vertex 3458 vert_list = [max(diag.get_vertex_leg_numbers()) for diag in \ 3459 matrix_element.get('diagrams') if diag.get_vertex_leg_numbers()!=[]] 3460 minvert = min(vert_list) if vert_list!=[] else 0 3461 3462 # Note that we need to use AMP2 number corresponding to the first 3463 # diagram number used for that AMP2. 3464 # The dictionary below maps the config ID to this corresponding first 3465 # diagram number 3466 config_index_map = {} 3467 # For each diagram number, the dictionary below gives the config_id it 3468 # belongs to or 0 if it doesn't belong to any. 3469 loop_amp_ID_to_config = {} 3470 3471 n_configs = 0 3472 for idiag, diag in enumerate(matrix_element.get('diagrams')): 3473 # Ignore any diagrams with 4-particle vertices. 3474 use_for_multichanneling = True 3475 if diag.get_vertex_leg_numbers()!=[] and max(diag.get_vertex_leg_numbers()) > minvert: 3476 use_for_multichanneling = False 3477 curr_config = 0 3478 else: 3479 n_configs += 1 3480 curr_config = n_configs 3481 3482 if not use_for_multichanneling: 3483 if 0 not in config_index_map: 3484 config_index_map[0] = idiag + 1 3485 else: 3486 config_index_map[curr_config] = idiag + 1 3487 3488 CT_amps = [ a.get('number') for a in diag.get_ct_amplitudes()] 3489 for CT_amp in CT_amps: 3490 loop_amp_ID_to_config[CT_amp] = curr_config 3491 3492 Loop_amps = [a.get('amplitudes')[0].get('number') 3493 for a in diag.get_loop_amplitudes()] 3494 for Loop_amp in Loop_amps: 3495 loop_amp_ID_to_config[Loop_amp] = curr_config 3496 3497 # Now write the amp2 related inputs in the replacement dictionary 3498 n_configs = len([k for k in config_index_map.keys() if k!=0]) 3499 replace_dict['nmultichannel_configs'] = n_configs 3500 # Now the placeholder 'nmultichannels' refers to the number of 3501 # multi-channels which are contributing which, in the non-grouped case 3502 # is always equal to the total number of multi-channels. 3503 replace_dict['nmultichannels'] = n_configs 3504 3505 res_list = [] 3506 conf_list = [config_index_map[i] for i in sorted(config_index_map.keys()) 3507 if i!=0] 3508 chunk_size = 6 3509 for k in xrange(0, len(conf_list), chunk_size): 3510 res_list.append("DATA (config_index_map(i),i=%6r,%6r) /%s/" % \ 3511 (k + 1, min(k + chunk_size, len(conf_list)), 3512 ','.join(["%6r" % i for i in conf_list[k:k + chunk_size]]))) 3513 3514 replace_dict['config_index_map_definition'] = '\n'.join(res_list) 3515 3516 res_list = [] 3517 n_loop_amps = max(loop_amp_ID_to_config.keys()) 3518 amp_list = [loop_amp_ID_to_config[i] for i in \ 3519 sorted(loop_amp_ID_to_config.keys()) if i!=0] 3520 chunk_size = 6 3521 for k in xrange(0, len(amp_list), chunk_size): 3522 res_list.append("DATA (CONFIG_MAP(i),i=%6r,%6r) /%s/" % \ 3523 (k + 1, min(k + chunk_size, len(amp_list)), 3524 ','.join(["%6r" % i for i in amp_list[k:k + chunk_size]]))) 3525 3526 replace_dict['config_map_definition'] = '\n'.join(res_list)
3527