Package madgraph :: Package loop :: Module loop_exporters
[hide private]
[frames] | no frames]

Source Code for Module madgraph.loop.loop_exporters

   1  ################################################################################ 
   2  # 
   3  # Copyright (c) 2009 The MadGraph5_aMC@NLO Development team and Contributors 
   4  # 
   5  # This file is a part of the MadGraph5_aMC@NLO project, an application which  
   6  # automatically generates Feynman diagrams and matrix elements for arbitrary 
   7  # high-energy processes in the Standard Model and beyond. 
   8  # 
   9  # It is subject to the MadGraph5_aMC@NLO license which should accompany this  
  10  # distribution. 
  11  # 
  12  # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch 
  13  # 
  14  ################################################################################ 
  15  """Methods and classes to export matrix elements to v4 format.""" 
  16   
  17  import copy 
  18  import fractions 
  19  import glob 
  20  import logging 
  21  import os 
  22  import stat 
  23  import sys 
  24  import re 
  25  import shutil 
  26  import subprocess 
  27  import itertools 
  28  import time 
  29  import datetime 
  30   
  31   
  32  import aloha 
  33   
  34  import madgraph.core.base_objects as base_objects 
  35  import madgraph.core.color_algebra as color 
  36  import madgraph.core.helas_objects as helas_objects 
  37  import madgraph.loop.loop_helas_objects as loop_helas_objects 
  38  import madgraph.iolibs.drawing_eps as draw 
  39  import madgraph.iolibs.files as files 
  40  import madgraph.iolibs.group_subprocs as group_subprocs 
  41  import madgraph.various.banner as banner_mod 
  42  import madgraph.various.misc as misc 
  43  import madgraph.various.q_polynomial as q_polynomial 
  44  import madgraph.iolibs.file_writers as writers 
  45  import madgraph.iolibs.gen_infohtml as gen_infohtml 
  46  import madgraph.iolibs.template_files as template_files 
  47  import madgraph.iolibs.ufo_expression_parsers as parsers 
  48  import madgraph.iolibs.export_v4 as export_v4 
  49  import madgraph.various.diagram_symmetry as diagram_symmetry 
  50  import madgraph.various.process_checks as process_checks 
  51  import madgraph.various.progressbar as pbar 
  52  import madgraph.various.q_polynomial as q_polynomial 
  53  import madgraph.core.color_amp as color_amp 
  54  import madgraph.iolibs.helas_call_writers as helas_call_writers 
  55  import models.check_param_card as check_param_card 
  56  from madgraph.loop.loop_base_objects import LoopDiagram 
  57  from madgraph.loop.MadLoopBannerStyles import MadLoopBannerStyles 
  58   
  59   
  60   
  61  pjoin = os.path.join 
  62   
  63  import aloha.create_aloha as create_aloha 
  64  import models.write_param_card as param_writer 
  65  from madgraph import MadGraph5Error, MG5DIR, InvalidCmd 
  66  from madgraph.iolibs.files import cp, ln, mv 
  67  pjoin = os.path.join 
  68  _file_path = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0] + '/' 
  69  logger = logging.getLogger('madgraph.loop_exporter') 
  70   
  71  #=============================================================================== 
  72  # LoopExporterFortran 
  73  #=============================================================================== 
74 -class LoopExporterFortran(object):
75 """ Class to define general helper functions to the different 76 loop fortran exporters (ME, SA, MEGroup, etc..) which will inherit both 77 from this class AND from the corresponding ProcessExporterFortran(ME,SA,...). 78 It plays the same role as ProcessExporterFrotran and simply defines here 79 loop-specific helpers functions necessary for all loop exporters. 80 Notice that we do not have LoopExporterFortran inheriting from 81 ProcessExporterFortran but give access to arguments like dir_path and 82 clean using options. This avoids method resolution object ambiguity""" 83 84 default_opt = dict(export_v4.ProcessExporterFortran.default_opt) 85 default_opt.update({'clean': False, 'complex_mass':False, 86 'export_format':'madloop', 'mp':True, 87 'loop_dir':'', 'cuttools_dir':'', 88 'fortran_compiler':'gfortran', 89 'SubProc_prefix': 'P', 90 'output_dependencies': 'external', 91 'compute_color_flows': False, 92 'mode':''}) 93 94 include_names = {'ninja' : 'mninja.mod', 95 'golem' : 'generic_function_1p.mod', 96 'samurai':'msamurai.mod', 97 'collier': 'collier.mod'} 98
99 - def __init__(self, dir_path = "", opt=None):
100 """Initiate the LoopExporterFortran with directory information on where 101 to find all the loop-related source files, like CutTools""" 102 103 104 self.opt = dict(self.default_opt) 105 if opt: 106 self.opt.update(opt) 107 108 self.SubProc_prefix = self.opt['SubProc_prefix'] 109 self.loop_dir = self.opt['loop_dir'] 110 self.cuttools_dir = self.opt['cuttools_dir'] 111 self.fortran_compiler = self.opt['fortran_compiler'] 112 self.dependencies = self.opt['output_dependencies'] 113 self.compute_color_flows = self.opt['compute_color_flows'] 114 115 super(LoopExporterFortran,self).__init__(dir_path, self.opt)
116 117 191
192 - def get_aloha_model(self, model):
193 """ Caches the aloha model created here as an attribute of the loop 194 exporter so that it can later be used in the LoopHelasMatrixElement 195 in the function compute_all_analytic_information for recycling aloha 196 computations across different LoopHelasMatrixElements steered by the 197 same loop exporter. 198 """ 199 if not hasattr(self, 'aloha_model'): 200 self.aloha_model = create_aloha.AbstractALOHAModel(os.path.basename(model.get('modelpath'))) 201 return self.aloha_model
202 203 #=========================================================================== 204 # write the multiple-precision header files 205 #===========================================================================
206 - def write_mp_files(self, writer_mprec, writer_mpc):
207 """Write the cts_mprec.h and cts_mpc.h""" 208 209 file = open(os.path.join(self.cuttools_dir, 'src/cts/cts_mprec.h')).read() 210 writer_mprec.writelines(file) 211 212 file = open(os.path.join(self.cuttools_dir, 'src/cts/cts_mpc.h')).read() 213 file = file.replace('&','') 214 writer_mpc.writelines(file) 215 216 return True
217 218 #=============================================================================== 219 # LoopProcessExporterFortranSA 220 #===============================================================================
221 -class LoopProcessExporterFortranSA(LoopExporterFortran, 222 export_v4.ProcessExporterFortranSA):
223 224 """Class to take care of exporting a set of loop matrix elements in the 225 Fortran format.""" 226 227 template_dir=os.path.join(_file_path,'iolibs/template_files/loop') 228 madloop_makefile_name = 'makefile' 229 230 MadLoop_banner = MadLoopBannerStyles.get_MadLoop_Banner( 231 style='classic2', color='green', 232 top_frame_char = '=', bottom_frame_char = '=', 233 left_frame_char = '{',right_frame_char = '}', 234 print_frame=True, side_margin = 7, up_margin = 1) 235
236 - def __init__(self, *args, **opts):
237 super(LoopProcessExporterFortranSA,self).__init__(*args,**opts) 238 self.unique_id=0 # to allow collier to distinguish the various loop subprocesses 239 self.has_loop_induced = False
240
241 - def copy_template(self, model):
242 """Additional actions needed to setup the Template. 243 """ 244 super(LoopProcessExporterFortranSA, self).copy_template(model) 245 246 self.loop_additional_template_setup()
247
248 - def finalize(self, matrix_element, cmdhistory, MG5options, outputflag):
249 """create the global information for loops""" 250 251 super(LoopProcessExporterFortranSA,self).finalize(matrix_element, 252 cmdhistory, MG5options, outputflag) 253 254 255 MLCard = banner_mod.MadLoopParam(pjoin(self.dir_path, 'Cards', 'MadLoopParams.dat')) 256 # For loop-induced processes and *only* when summing over all helicity configurations 257 # (which is the default for standalone usage), COLLIER is faster than Ninja. 258 if self.has_loop_induced: 259 MLCard['MLReductionLib'] = "7|6|1" 260 # Computing the poles with COLLIER also unnecessarily slows down the code 261 # It should only be set to True for checks and it's acceptable to remove them 262 # here because for loop-induced processes they should be zero anyway. 263 # We keep it active for non-loop induced processes because COLLIER is not the 264 # main reduction tool in that case, and the poles wouldn't be zero then 265 MLCard['COLLIERComputeUVpoles'] = False 266 MLCard['COLLIERComputeIRpoles'] = False 267 268 MLCard.write(pjoin(self.dir_path, 'Cards', 'MadLoopParams_default.dat')) 269 MLCard.write(pjoin(self.dir_path, 'Cards', 'MadLoopParams.dat'))
270
271 - def write_f2py_makefile(self):
272 return
273
274 - def write_f2py_check_sa(self, matrix_element, output_path):
275 """ Write the general check_sa.py in SubProcesses that calls all processes successively.""" 276 277 # No need to further edit this file for now. 278 file = open(os.path.join(self.template_dir,\ 279 'check_sa_all.py.inc')).read() 280 open(output_path,'w').writelines(file) 281 # Make it executable 282 os.chmod(output_path, os.stat(output_path).st_mode | stat.S_IEXEC)
283 284
285 - def write_f2py_splitter(self):
286 """write a function to call the correct matrix element""" 287 288 template = """ 289 %(python_information)s 290 291 SUBROUTINE INITIALISE(PATH) 292 C ROUTINE FOR F2PY to read the benchmark point. 293 IMPLICIT NONE 294 CHARACTER*512 PATH 295 CF2PY INTENT(IN) :: PATH 296 CALL SETPARA(PATH) !first call to setup the paramaters 297 RETURN 298 END 299 300 SUBROUTINE SET_MADLOOP_PATH(PATH) 301 C Routine to set the path of the folder 'MadLoop5_resources' to MadLoop 302 CHARACTER(512) PATH 303 CF2PY intent(in)::path 304 CALL SETMADLOOPPATH(PATH) 305 END 306 307 subroutine smatrixhel(pdgs, npdg, p, ALPHAS, SCALES2, nhel, ANS, RETURNCODE) 308 IMPLICIT NONE 309 310 CF2PY double precision, intent(in), dimension(0:3,npdg) :: p 311 CF2PY integer, intent(in), dimension(npdg) :: pdgs 312 CF2PY integer, intent(in) :: npdg 313 CF2PY double precision, intent(out) :: ANS 314 CF2PY integer, intent(out) :: RETURNCODE 315 CF2PY double precision, intent(in) :: ALPHAS 316 CF2PY double precision, intent(in) :: SCALES2 317 318 integer pdgs(*) 319 integer npdg, nhel, RETURNCODE 320 double precision p(*) 321 double precision ANS, ALPHAS, PI,SCALES2 322 323 %(smatrixhel)s 324 325 return 326 end 327 328 subroutine get_pdg_order(OUT) 329 IMPLICIT NONE 330 CF2PY INTEGER, intent(out) :: OUT(%(nb_me)i,%(maxpart)i) 331 332 INTEGER OUT(%(nb_me)i,%(maxpart)i), PDGS(%(nb_me)i,%(maxpart)i) 333 DATA PDGS/ %(pdgs)s / 334 OUT=PDGS 335 RETURN 336 END 337 338 subroutine get_prefix(PREFIX) 339 IMPLICIT NONE 340 CF2PY CHARACTER*20, intent(out) :: PREFIX(%(nb_me)i) 341 character*20 PREFIX(%(nb_me)i),PREF(%(nb_me)i) 342 DATA PREF / '%(prefix)s'/ 343 PREFIX = PREF 344 RETURN 345 END 346 347 """ 348 349 allids = self.prefix_info.keys() 350 allprefix = [self.prefix_info[key][0] for key in allids] 351 min_nexternal = min([len(ids) for ids in allids]) 352 max_nexternal = max([len(ids) for ids in allids]) 353 354 info = [] 355 for key, (prefix, tag) in self.prefix_info.items(): 356 info.append('#PY %s : %s # %s' % (tag, key, prefix)) 357 358 359 text = [] 360 for n_ext in range(min_nexternal, max_nexternal+1): 361 current = [ids for ids in allids if len(ids)==n_ext] 362 if not current: 363 continue 364 if min_nexternal != max_nexternal: 365 if n_ext == min_nexternal: 366 text.append(' if (npdg.eq.%i)then' % n_ext) 367 else: 368 text.append(' else if (npdg.eq.%i)then' % n_ext) 369 for ii,pdgs in enumerate(current): 370 condition = '.and.'.join(['%i.eq.pdgs(%i)' %(pdg, i+1) for i, pdg in enumerate(pdgs)]) 371 if ii==0: 372 text.append( ' if(%s) then ! %i' % (condition, i)) 373 else: 374 text.append( ' else if(%s) then ! %i' % (condition,i)) 375 text.append(' call %sget_me(p, ALPHAS, DSQRT(SCALES2), NHEL, ANS, RETURNCODE)' % self.prefix_info[pdgs][0]) 376 text.append(' endif') 377 #close the function 378 if min_nexternal != max_nexternal: 379 text.append('endif') 380 381 formatting = {'python_information':'\n'.join(info), 382 'smatrixhel': '\n'.join(text), 383 'maxpart': max_nexternal, 384 'nb_me': len(allids), 385 'pdgs': ','.join([str(pdg[i]) if i<len(pdg) else '0' 386 for i in range(max_nexternal) \ 387 for pdg in allids]), 388 'prefix':'\',\''.join(allprefix) 389 } 390 391 392 text = template % formatting 393 fsock = writers.FortranWriter(pjoin(self.dir_path, 'SubProcesses', 'all_matrix.f'),'w') 394 fsock.writelines(text) 395 fsock.close()
396 397 398
399 - def loop_additional_template_setup(self, copy_Source_makefile = True):
400 """ Perform additional actions specific for this class when setting 401 up the template with the copy_template function.""" 402 403 # We must change some files to their version for NLO computations 404 cpfiles= ["Cards/MadLoopParams.dat", 405 "SubProcesses/MadLoopParamReader.f", 406 "SubProcesses/MadLoopParams.inc"] 407 if copy_Source_makefile: 408 cpfiles.append("Source/makefile") 409 410 for file in cpfiles: 411 shutil.copy(os.path.join(self.loop_dir,'StandAlone/', file), 412 os.path.join(self.dir_path, file)) 413 414 cp(pjoin(self.loop_dir,'StandAlone/Cards/MadLoopParams.dat'), 415 pjoin(self.dir_path, 'Cards/MadLoopParams_default.dat')) 416 417 ln(pjoin(self.dir_path, 'Cards','MadLoopParams.dat'), pjoin(self.dir_path,'SubProcesses')) 418 419 # We might need to give a different name to the MadLoop makefile 420 shutil.copy(pjoin(self.loop_dir,'StandAlone','SubProcesses','makefile'), 421 pjoin(self.dir_path, 'SubProcesses',self.madloop_makefile_name)) 422 423 # Write SubProcesses/MadLoop_makefile_definitions with dummy variables 424 # for the non-optimized output 425 link_tir_libs=[] 426 tir_libs=[] 427 428 filePath = pjoin(self.dir_path, 'SubProcesses', 429 'MadLoop_makefile_definitions') 430 calls = self.write_loop_makefile_definitions( 431 writers.MakefileWriter(filePath),link_tir_libs,tir_libs) 432 433 # We need minimal editing of MadLoopCommons.f 434 # For the optimized output, this file will be overwritten once the 435 # availability of COLLIER has been determined. 436 MadLoopCommon = open(os.path.join(self.loop_dir,'StandAlone', 437 "SubProcesses","MadLoopCommons.inc")).read() 438 writer = writers.FortranWriter(os.path.join(self.dir_path, 439 "SubProcesses","MadLoopCommons.f")) 440 writer.writelines(MadLoopCommon%{ 441 'print_banner_commands':self.MadLoop_banner}, context={ 442 'collier_available':False}) 443 writer.close() 444 445 # Copy the whole MadLoop5_resources directory (empty at this stage) 446 if not os.path.exists(pjoin(self.dir_path,'SubProcesses', 447 'MadLoop5_resources')): 448 cp(pjoin(self.loop_dir,'StandAlone','SubProcesses', 449 'MadLoop5_resources'),pjoin(self.dir_path,'SubProcesses')) 450 451 # Link relevant cards from Cards inside the MadLoop5_resources 452 ln(pjoin(self.dir_path,'SubProcesses','MadLoopParams.dat'), 453 pjoin(self.dir_path,'SubProcesses','MadLoop5_resources')) 454 ln(pjoin(self.dir_path,'Cards','param_card.dat'), 455 pjoin(self.dir_path,'SubProcesses','MadLoop5_resources')) 456 ln(pjoin(self.dir_path,'Cards','ident_card.dat'), 457 pjoin(self.dir_path,'SubProcesses','MadLoop5_resources')) 458 459 # And remove check_sa in the SubProcess folder since now there is a 460 # check_sa tailored to each subprocess. 461 if os.path.isfile(pjoin(self.dir_path,'SubProcesses','check_sa.f')): 462 os.remove(pjoin(self.dir_path,'SubProcesses','check_sa.f')) 463 464 cwd = os.getcwd() 465 dirpath = os.path.join(self.dir_path, 'SubProcesses') 466 try: 467 os.chdir(dirpath) 468 except os.error: 469 logger.error('Could not cd to directory %s' % dirpath) 470 return 0 471 472 # Write the cts_mpc.h and cts_mprec.h files imported from CutTools 473 self.write_mp_files(writers.FortranWriter('cts_mprec.h'),\ 474 writers.FortranWriter('cts_mpc.h')) 475 476 # Return to original PWD 477 os.chdir(cwd) 478 479 # We must link the CutTools to the Library folder of the active Template 480 super(LoopProcessExporterFortranSA, self).link_CutTools(self.dir_path)
481 482 # This function is placed here and not in optimized exporterd, 483 # because the same makefile.inc should be used in all cases.
484 - def write_loop_makefile_definitions(self, writer, link_tir_libs, 485 tir_libs,tir_include=[]):
486 """ Create the file makefile which links to the TIR libraries.""" 487 488 file = open(os.path.join(self.loop_dir,'StandAlone', 489 'SubProcesses','MadLoop_makefile_definitions.inc')).read() 490 replace_dict={} 491 replace_dict['link_tir_libs']=' '.join(link_tir_libs) 492 replace_dict['tir_libs']=' '.join(tir_libs) 493 replace_dict['dotf']='%.f' 494 replace_dict['prefix']= self.SubProc_prefix 495 replace_dict['doto']='%.o' 496 replace_dict['tir_include']=' '.join(tir_include) 497 file=file%replace_dict 498 if writer: 499 writer.writelines(file) 500 else: 501 return file
502
503 - def convert_model(self, model, wanted_lorentz = [], 504 wanted_couplings = []):
505 """ Caches the aloha model created here when writing out the aloha 506 fortran subroutine. 507 """ 508 self.get_aloha_model(model) 509 super(LoopProcessExporterFortranSA, self).convert_model(model, 510 wanted_lorentz = wanted_lorentz, wanted_couplings = wanted_couplings)
511
512 - def get_ME_identifier(self, matrix_element, 513 group_number = None, group_elem_number = None):
514 """ A function returning a string uniquely identifying the matrix 515 element given in argument so that it can be used as a prefix to all 516 MadLoop5 subroutines and common blocks related to it. This allows 517 to compile several processes into one library as requested by the 518 BLHA (Binoth LesHouches Accord) guidelines. 519 The arguments group_number and proc_id are just for the LoopInduced 520 output with MadEvent.""" 521 522 # When disabling the loop grouping in the LoopInduced MadEvent output, 523 # we have only the group_number set and the proc_id set to None. In this 524 # case we don't print the proc_id. 525 if (not group_number is None) and group_elem_number is None: 526 return 'ML5_%d_%s_'%(matrix_element.get('processes')[0].get('id'), 527 group_number) 528 elif group_number is None or group_elem_number is None: 529 return 'ML5_%d_'%matrix_element.get('processes')[0].get('id') 530 else: 531 return 'ML5_%d_%s_%s_'%(matrix_element.get('processes')[0].get('id'), 532 group_number, group_elem_number)
533
534 - def get_SubProc_folder_name(self, process, 535 group_number = None, group_elem_number = None):
536 """Returns the name of the SubProcess directory, which can contain 537 the process goup and group element number for the case of loop-induced 538 integration with MadEvent.""" 539 540 # When disabling the loop grouping in the LoopInduced MadEvent output, 541 # we have only the group_number set and the proc_id set to None. In this 542 # case we don't print the proc_id. 543 if not group_number is None and group_elem_number is None: 544 return "%s%d_%s_%s"%(self.SubProc_prefix, process.get('id'), 545 group_number,process.shell_string(print_id=False)) 546 elif group_number is None or group_elem_number is None: 547 return "%s%s" %(self.SubProc_prefix,process.shell_string()) 548 else: 549 return "%s%d_%s_%s_%s"%(self.SubProc_prefix, process.get('id'), 550 group_number, group_elem_number,process.shell_string(print_id=False))
551 552 #=========================================================================== 553 # Set the compiler to be gfortran for the loop processes. 554 #===========================================================================
555 - def compiler_choice(self, compiler=export_v4.default_compiler):
556 """ Different daughter classes might want different compilers. 557 Here, the gfortran compiler is used throughout the compilation 558 (mandatory for CutTools written in f90) """ 559 if isinstance(compiler, str): 560 fortran_compiler = compiler 561 compiler = export_v4.default_compiler 562 compiler['fortran'] = fortran_compiler 563 564 if not compiler['fortran'] is None and not \ 565 any([name in compiler['fortran'] for name in \ 566 ['gfortran','ifort']]): 567 logger.info('For loop processes, the compiler must be fortran90'+\ 568 'compatible, like gfortran.') 569 compiler['fortran'] = 'gfortran' 570 self.set_compiler(compiler,True) 571 else: 572 self.set_compiler(compiler) 573 574 self.set_cpp_compiler(compiler['cpp'])
575
576 - def turn_to_mp_calls(self, helas_calls_list):
577 # Prepend 'MP_' to all the helas calls in helas_calls_list. 578 # Might look like a brutal unsafe implementation, but it is not as 579 # these calls are built from the properties of the HELAS objects and 580 # whether they are evaluated in double or quad precision is none of 581 # their business but only relevant to the output algorithm. 582 # Also the cast to complex masses DCMPLX(*) must be replaced by 583 # CMPLX(*,KIND=16) 584 MP=re.compile(r"(?P<toSub>^.*CALL\s+)",re.IGNORECASE | re.MULTILINE) 585 586 def replaceWith(match_obj): 587 return match_obj.group('toSub')+'MP_'
588 589 DCMPLX=re.compile(r"DCMPLX\((?P<toSub>([^\)]*))\)",\ 590 re.IGNORECASE | re.MULTILINE) 591 592 for i, helas_call in enumerate(helas_calls_list): 593 new_helas_call=MP.sub(replaceWith,helas_call) 594 helas_calls_list[i]=DCMPLX.sub(r"CMPLX(\g<toSub>,KIND=16)",\ 595 new_helas_call)
596 600 608
609 - def make(self):
610 """ Compiles the additional dependences for loop (such as CutTools).""" 611 super(LoopProcessExporterFortranSA, self).make() 612 613 # make CutTools (only necessary with MG option output_dependencies='internal') 614 libdir = os.path.join(self.dir_path,'lib') 615 sourcedir = os.path.join(self.dir_path,'Source') 616 if self.dependencies=='internal': 617 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 618 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 619 if os.path.exists(pjoin(sourcedir,'CutTools')): 620 logger.info('Compiling CutTools (can take a couple of minutes) ...') 621 misc.compile(['CutTools','-j1'], cwd = sourcedir, nb_core=1) 622 logger.info(' ...done.') 623 else: 624 raise MadGraph5Error('Could not compile CutTools because its'+\ 625 ' source directory could not be found in the SOURCE folder.') 626 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 627 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 628 raise MadGraph5Error('CutTools compilation failed.') 629 630 # Verify compatibility between current compiler and the one which was 631 # used when last compiling CutTools (if specified). 632 compiler_log_path = pjoin(os.path.dirname((os.path.realpath(pjoin( 633 libdir, 'libcts.a')))),'compiler_version.log') 634 if os.path.exists(compiler_log_path): 635 compiler_version_used = open(compiler_log_path,'r').read() 636 if not str(misc.get_gfortran_version(misc.detect_current_compiler(\ 637 pjoin(sourcedir,'make_opts')))) in compiler_version_used: 638 if os.path.exists(pjoin(sourcedir,'CutTools')): 639 logger.info('CutTools was compiled with a different fortran'+\ 640 ' compiler. Re-compiling it now...') 641 misc.compile(['cleanCT'], cwd = sourcedir) 642 misc.compile(['CutTools','-j1'], cwd = sourcedir, nb_core=1) 643 logger.info(' ...done.') 644 else: 645 raise MadGraph5Error("CutTools installation in %s"\ 646 %os.path.realpath(pjoin(libdir, 'libcts.a'))+\ 647 " seems to have been compiled with a different compiler than"+\ 648 " the one specified in MG5_aMC. Please recompile CutTools.")
649
650 - def cat_coeff(self, ff_number, frac, is_imaginary, Nc_power, Nc_value=3):
651 """Concatenate the coefficient information to reduce it to 652 (fraction, is_imaginary) """ 653 654 total_coeff = ff_number * frac * fractions.Fraction(Nc_value) ** Nc_power 655 656 return (total_coeff, is_imaginary)
657
658 - def get_amp_to_jamp_map(self, col_amps, n_amps):
659 """ Returns a list with element 'i' being a list of tuples corresponding 660 to all apparition of amplitude number 'i' in the jamp number 'j' 661 with coeff 'coeff_j'. The format of each tuple describing an apparition 662 is (j, coeff_j). where coeff_j is of the form (Fraction, is_imag).""" 663 664 if(isinstance(col_amps,list)): 665 if(col_amps and isinstance(col_amps[0],list)): 666 color_amplitudes=col_amps 667 else: 668 raise MadGraph5Error, "Incorrect col_amps argument passed to get_amp_to_jamp_map" 669 else: 670 raise MadGraph5Error, "Incorrect col_amps argument passed to get_amp_to_jamp_map" 671 672 # To store the result 673 res_list = [[] for i in range(n_amps)] 674 for i, coeff_list in enumerate(color_amplitudes): 675 for (coefficient, amp_number) in coeff_list: 676 res_list[amp_number-1].append((i,self.cat_coeff(\ 677 coefficient[0],coefficient[1],coefficient[2],coefficient[3]))) 678 679 return res_list
680
681 - def get_color_matrix(self, matrix_element):
682 """Return the color matrix definition lines. This color matrix is of size 683 NLOOPAMPSxNBORNAMPS and allows for squaring individually each Loop and Born 684 amplitude.""" 685 686 logger.info('Computing diagram color coefficients') 687 688 # The two lists have a list of tuples at element 'i' which correspond 689 # to all apparitions of loop amplitude number 'i' in the jampl number 'j' 690 # with coeff 'coeffj'. The format of each tuple describing an apparition 691 # is (j, coeffj). 692 ampl_to_jampl=self.get_amp_to_jamp_map(\ 693 matrix_element.get_loop_color_amplitudes(), 694 matrix_element.get_number_of_loop_amplitudes()) 695 if matrix_element.get('processes')[0].get('has_born'): 696 ampb_to_jampb=self.get_amp_to_jamp_map(\ 697 matrix_element.get_born_color_amplitudes(), 698 matrix_element.get_number_of_born_amplitudes()) 699 else: 700 ampb_to_jampb=ampl_to_jampl 701 # Below is the original color matrix multiplying the JAMPS 702 if matrix_element.get('color_matrix'): 703 ColorMatrixDenom = \ 704 matrix_element.get('color_matrix').get_line_denominators() 705 ColorMatrixNum = [ matrix_element.get('color_matrix').\ 706 get_line_numerators(index, denominator) for 707 (index, denominator) in enumerate(ColorMatrixDenom) ] 708 else: 709 ColorMatrixDenom= [1] 710 ColorMatrixNum = [[1]] 711 712 # Below is the final color matrix output 713 ColorMatrixNumOutput=[] 714 ColorMatrixDenomOutput=[] 715 716 # Now we construct the color factors between each born and loop amplitude 717 # by scanning their contributions to the different jamps. 718 start = time.time() 719 progress_bar = None 720 time_info = False 721 for i, jampl_list in enumerate(ampl_to_jampl): 722 # This can be pretty long for processes with many color flows. 723 # So, if necessary (i.e. for more than 15s), we tell the user the 724 # estimated time for the processing. 725 if i==5: 726 elapsed_time = time.time()-start 727 t = len(ampl_to_jampl)*(elapsed_time/5.0) 728 if t > 10.0: 729 time_info = True 730 logger.info('The color factors computation will take '+\ 731 ' about %s to run. '%str(datetime.timedelta(seconds=int(t)))+\ 732 'Started on %s.'%datetime.datetime.now().strftime(\ 733 "%d-%m-%Y %H:%M")) 734 if logger.getEffectiveLevel()<logging.WARNING: 735 widgets = ['Color computation:', pbar.Percentage(), ' ', 736 pbar.Bar(),' ', pbar.ETA(), ' '] 737 progress_bar = pbar.ProgressBar(widgets=widgets, 738 maxval=len(ampl_to_jampl), fd=sys.stdout) 739 740 if not progress_bar is None: 741 progress_bar.update(i+1) 742 # Flush to force the printout of the progress_bar to be updated 743 sys.stdout.flush() 744 745 line_num=[] 746 line_denom=[] 747 748 # Treat the special case where this specific amplitude contributes to no 749 # color flow at all. So it is zero because of color but not even due to 750 # an accidental cancellation among color flows, but simply because of its 751 # projection to each individual color flow is zero. In such case, the 752 # corresponding jampl_list is empty and all color coefficients must then 753 # be zero. This happens for example in the Higgs Effective Theory model 754 # for the bubble made of a 4-gluon vertex and the effective ggH vertex. 755 if len(jampl_list)==0: 756 line_num=[0]*len(ampb_to_jampb) 757 line_denom=[1]*len(ampb_to_jampb) 758 ColorMatrixNumOutput.append(line_num) 759 ColorMatrixDenomOutput.append(line_denom) 760 continue 761 762 for jampb_list in ampb_to_jampb: 763 real_num=0 764 imag_num=0 765 common_denom=color_amp.ColorMatrix.lcmm(*[abs(ColorMatrixDenom[jampl]* 766 ampl_coeff[0].denominator*ampb_coeff[0].denominator) for 767 ((jampl, ampl_coeff),(jampb,ampb_coeff)) in 768 itertools.product(jampl_list,jampb_list)]) 769 for ((jampl, ampl_coeff),(jampb, ampb_coeff)) in \ 770 itertools.product(jampl_list,jampb_list): 771 # take the numerator and multiply by lcm/denominator 772 # as we will later divide by the lcm. 773 buff_num=ampl_coeff[0].numerator*\ 774 ampb_coeff[0].numerator*ColorMatrixNum[jampl][jampb]*\ 775 abs(common_denom)/(ampl_coeff[0].denominator*\ 776 ampb_coeff[0].denominator*ColorMatrixDenom[jampl]) 777 # Remember that we must take the complex conjugate of 778 # the born jamp color coefficient because we will compute 779 # the square with 2 Re(LoopAmp x BornAmp*) 780 if ampl_coeff[1] and ampb_coeff[1]: 781 real_num=real_num+buff_num 782 elif not ampl_coeff[1] and not ampb_coeff[1]: 783 real_num=real_num+buff_num 784 elif not ampl_coeff[1] and ampb_coeff[1]: 785 imag_num=imag_num-buff_num 786 else: 787 imag_num=imag_num+buff_num 788 assert not (real_num!=0 and imag_num!=0), "MadGraph5_aMC@NLO found a "+\ 789 "color matrix element which has both a real and imaginary part." 790 if imag_num!=0: 791 res=fractions.Fraction(imag_num,common_denom) 792 line_num.append(res.numerator) 793 # Negative denominator means imaginary color coef of the 794 # final color matrix 795 line_denom.append(res.denominator*-1) 796 else: 797 res=fractions.Fraction(real_num,common_denom) 798 line_num.append(res.numerator) 799 # Positive denominator means real color coef of the final color matrix 800 line_denom.append(res.denominator) 801 802 ColorMatrixNumOutput.append(line_num) 803 ColorMatrixDenomOutput.append(line_denom) 804 805 if time_info: 806 logger.info('Finished on %s.'%datetime.datetime.now().strftime(\ 807 "%d-%m-%Y %H:%M")) 808 if progress_bar!=None: 809 progress_bar.finish() 810 811 return (ColorMatrixNumOutput,ColorMatrixDenomOutput)
812
813 - def get_context(self,matrix_element):
814 """ Returns the contextual variables which need to be set when 815 pre-processing the template files.""" 816 817 # The nSquaredSO entry of the general replace dictionary should have 818 # been set in write_loopmatrix prior to the first call to this function 819 # However, for cases where the TIRCaching contextual variable is 820 # irrelevant (like in the default output), this might not be the case 821 # so we set it to 1. 822 try: 823 n_squared_split_orders = matrix_element.rep_dict['nSquaredSO'] 824 except (KeyError, AttributeError): 825 n_squared_split_orders = 1 826 827 LoopInduced = not matrix_element.get('processes')[0].get('has_born') 828 self.has_loop_induced = max(LoopInduced, self.has_loop_induced) 829 # Force the computation of loop color flows for loop_induced processes 830 ComputeColorFlows = self.compute_color_flows or LoopInduced 831 # The variable AmplitudeReduction is just to make the contextual 832 # conditions more readable in the include files. 833 AmplitudeReduction = LoopInduced or ComputeColorFlows 834 # Even when not reducing at the amplitude level, the TIR caching 835 # is useful when there is more than one squared split order config. 836 TIRCaching = AmplitudeReduction or n_squared_split_orders>1 837 MadEventOutput = False 838 return {'LoopInduced': LoopInduced, 839 'ComputeColorFlows': ComputeColorFlows, 840 'AmplitudeReduction': AmplitudeReduction, 841 'TIRCaching': TIRCaching, 842 'MadEventOutput': MadEventOutput}
843 844 845 #=========================================================================== 846 # generate_subprocess_directory 847 #===========================================================================
848 - def generate_loop_subprocess(self, matrix_element, fortran_model, 849 group_number = None, proc_id = None, config_map=None, unique_id=None):
850 """Generate the Pxxxxx directory for a loop subprocess in MG4 standalone, 851 including the necessary loop_matrix.f, born_matrix.f and include files. 852 Notice that this is too different from generate_subprocess_directory 853 so that there is no point reusing this mother function. 854 The 'group_number' and 'proc_id' options are only used for the LoopInduced 855 MadEvent output and only to specify the ME_identifier and the P* 856 SubProcess directory name.""" 857 858 cwd = os.getcwd() 859 proc_dir_name = self.get_SubProc_folder_name( 860 matrix_element.get('processes')[0],group_number,proc_id) 861 dirpath = os.path.join(self.dir_path, 'SubProcesses', proc_dir_name) 862 863 try: 864 os.mkdir(dirpath) 865 except os.error as error: 866 logger.warning(error.strerror + " " + dirpath) 867 868 try: 869 os.chdir(dirpath) 870 except os.error: 871 logger.error('Could not cd to directory %s' % dirpath) 872 return 0 873 874 logger.info('Creating files in directory %s' % dirpath) 875 876 if unique_id is None: 877 raise MadGraph5Error, 'A unique id must be provided to the function'+\ 878 'generate_loop_subprocess of LoopProcessExporterFortranSA.' 879 # Create an include with the unique consecutive ID assigned 880 open('unique_id.inc','w').write( 881 """ integer UNIQUE_ID 882 parameter(UNIQUE_ID=%d)"""%unique_id) 883 884 # Extract number of external particles 885 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 886 887 calls=self.write_loop_matrix_element_v4(None,matrix_element, 888 fortran_model, group_number = group_number, 889 proc_id = proc_id, config_map = config_map) 890 891 # We assume here that all processes must share the same property of 892 # having a born or not, which must be true anyway since these are two 893 # definite different classes of processes which can never be treated on 894 # the same footing. 895 if matrix_element.get('processes')[0].get('has_born'): 896 filename = 'born_matrix.f' 897 calls = self.write_bornmatrix( 898 writers.FortranWriter(filename), 899 matrix_element, 900 fortran_model) 901 902 filename = 'pmass.inc' 903 self.write_pmass_file(writers.FortranWriter(filename), 904 matrix_element) 905 906 filename = 'ngraphs.inc' 907 self.write_ngraphs_file(writers.FortranWriter(filename), 908 len(matrix_element.get_all_amplitudes())) 909 910 # Do not draw the loop diagrams if they are too many. 911 # The user can always decide to do it manually, if really needed 912 loop_diags = [loop_diag for loop_diag in\ 913 matrix_element.get('base_amplitude').get('loop_diagrams')\ 914 if isinstance(loop_diag,LoopDiagram) and loop_diag.get('type') > 0] 915 if len(loop_diags)>5000: 916 logger.info("There are more than 5000 loop diagrams."+\ 917 "Only the first 5000 are drawn.") 918 filename = "loop_matrix.ps" 919 plot = draw.MultiEpsDiagramDrawer(base_objects.DiagramList( 920 loop_diags[:5000]),filename, 921 model=matrix_element.get('processes')[0].get('model'),amplitude='') 922 logger.info("Drawing loop Feynman diagrams for " + \ 923 matrix_element.get('processes')[0].nice_string()) 924 plot.draw() 925 926 if matrix_element.get('processes')[0].get('has_born'): 927 filename = "born_matrix.ps" 928 plot = draw.MultiEpsDiagramDrawer(matrix_element.get('base_amplitude').\ 929 get('born_diagrams'), 930 filename, 931 model=matrix_element.get('processes')[0].\ 932 get('model'), 933 amplitude='') 934 logger.info("Generating born Feynman diagrams for " + \ 935 matrix_element.get('processes')[0].nice_string(\ 936 print_weighted=False)) 937 plot.draw() 938 939 self.link_files_from_Subprocesses(self.get_SubProc_folder_name( 940 matrix_element.get('processes')[0],group_number,proc_id)) 941 942 # Return to original PWD 943 os.chdir(cwd) 944 945 if not calls: 946 calls = 0 947 return calls
948 969
970 - def generate_general_replace_dict(self,matrix_element, 971 group_number = None, proc_id = None):
972 """Generates the entries for the general replacement dictionary used 973 for the different output codes for this exporter.The arguments 974 group_number and proc_id are just for the LoopInduced output with MadEvent.""" 975 976 dict={} 977 # A general process prefix which appears in front of all MadLooop 978 # subroutines and common block so that several processes can be compiled 979 # together into one library, as necessary to follow BLHA guidelines. 980 981 dict['proc_prefix'] = self.get_ME_identifier(matrix_element, 982 group_number = group_number, group_elem_number = proc_id) 983 984 if 'prefix' in self.cmd_options and self.cmd_options['prefix'] in ['int','proc']: 985 for proc in matrix_element.get('processes'): 986 ids = [l.get('id') for l in proc.get('legs_with_decays')] 987 self.prefix_info[tuple(ids)] = [dict['proc_prefix'], proc.get_tag()] 988 989 # The proc_id is used for MadEvent grouping, so none of our concern here 990 # and it is simply set to an empty string. 991 dict['proc_id'] = '' 992 # Extract version number and date from VERSION file 993 info_lines = self.get_mg5_info_lines() 994 dict['info_lines'] = info_lines 995 # Extract process info lines 996 process_lines = self.get_process_info_lines(matrix_element) 997 dict['process_lines'] = process_lines 998 # Extract number of external particles 999 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 1000 dict['nexternal'] = nexternal 1001 dict['nincoming'] = ninitial 1002 # Extract ncomb 1003 ncomb = matrix_element.get_helicity_combinations() 1004 dict['ncomb'] = ncomb 1005 # Extract nloopamps 1006 nloopamps = matrix_element.get_number_of_loop_amplitudes() 1007 dict['nloopamps'] = nloopamps 1008 # Extract nloopdiags 1009 nloopdiags = len(matrix_element.get('diagrams')) 1010 dict['nloopdiags'] = nloopdiags 1011 # Extract nctamps 1012 nctamps = matrix_element.get_number_of_CT_amplitudes() 1013 dict['nctamps'] = nctamps 1014 # Extract nwavefuncs 1015 nwavefuncs = matrix_element.get_number_of_external_wavefunctions() 1016 dict['nwavefuncs'] = nwavefuncs 1017 # Set format of the double precision 1018 dict['real_dp_format']='real*8' 1019 dict['real_mp_format']='real*16' 1020 # Set format of the complex 1021 dict['complex_dp_format']='complex*16' 1022 dict['complex_mp_format']='complex*32' 1023 # Set format of the masses 1024 dict['mass_dp_format'] = dict['complex_dp_format'] 1025 dict['mass_mp_format'] = dict['complex_mp_format'] 1026 # Fill in default values for the placeholders for the madevent 1027 # loop-induced output 1028 dict['nmultichannels'] = 0 1029 dict['nmultichannel_configs'] = 0 1030 dict['config_map_definition'] = '' 1031 dict['config_index_map_definition'] = '' 1032 # Color matrix size 1033 # For loop induced processes it is NLOOPAMPSxNLOOPAMPS and otherwise 1034 # it is NLOOPAMPSxNBORNAMPS 1035 # Also, how to access the number of Born squared order contributions 1036 1037 if matrix_element.get('processes')[0].get('has_born'): 1038 dict['color_matrix_size'] = 'nbornamps' 1039 dict['get_nsqso_born']=\ 1040 "include 'nsqso_born.inc'" 1041 else: 1042 dict['get_nsqso_born']="""INTEGER NSQSO_BORN 1043 PARAMETER (NSQSO_BORN=0) 1044 """ 1045 dict['color_matrix_size'] = 'nloopamps' 1046 1047 # These placeholders help to have as many common templates for the 1048 # output of the loop induced processes and those with a born 1049 # contribution. 1050 if matrix_element.get('processes')[0].get('has_born'): 1051 # Extract nbornamps 1052 nbornamps = matrix_element.get_number_of_born_amplitudes() 1053 dict['nbornamps'] = nbornamps 1054 dict['ncomb_helas_objs'] = ',ncomb' 1055 dict['nbornamps_decl'] = \ 1056 """INTEGER NBORNAMPS 1057 PARAMETER (NBORNAMPS=%d)"""%nbornamps 1058 dict['nBornAmps'] = nbornamps 1059 1060 else: 1061 dict['ncomb_helas_objs'] = '' 1062 dict['dp_born_amps_decl'] = '' 1063 dict['dp_born_amps_decl_in_mp'] = '' 1064 dict['copy_mp_to_dp_born_amps'] = '' 1065 dict['mp_born_amps_decl'] = '' 1066 dict['nbornamps_decl'] = '' 1067 dict['nbornamps'] = 0 1068 dict['nBornAmps'] = 0 1069 1070 return dict
1071
1072 - def write_loop_matrix_element_v4(self, writer, matrix_element, fortran_model, 1073 group_number = None, proc_id = None, config_map = None):
1074 """ Writes loop_matrix.f, CT_interface.f, loop_num.f and 1075 mp_born_amps_and_wfs. 1076 The arguments group_number and proc_id are just for the LoopInduced 1077 output with MadEvent and only used in get_ME_identifier. 1078 """ 1079 1080 # Create the necessary files for the loop matrix element subroutine 1081 1082 if config_map: 1083 raise MadGraph5Error, 'The default loop output cannot be used with'+\ 1084 'MadEvent and cannot compute the AMP2 for multi-channeling.' 1085 1086 if not isinstance(fortran_model,\ 1087 helas_call_writers.FortranUFOHelasCallWriter): 1088 raise MadGraph5Error, 'The loop fortran output can only'+\ 1089 ' work with a UFO Fortran model' 1090 1091 LoopFortranModel = helas_call_writers.FortranUFOHelasCallWriter( 1092 argument=fortran_model.get('model'), 1093 hel_sum=matrix_element.get('processes')[0].get('has_born')) 1094 1095 # Compute the analytical information of the loop wavefunctions in the 1096 # loop helas matrix elements using the cached aloha model to reuse 1097 # as much as possible the aloha computations already performed for 1098 # writing out the aloha fortran subroutines. 1099 matrix_element.compute_all_analytic_information( 1100 self.get_aloha_model(matrix_element.get('processes')[0].get('model'))) 1101 1102 # Initialize a general replacement dictionary with entries common to 1103 # many files generated here. 1104 matrix_element.rep_dict = self.generate_general_replace_dict( 1105 matrix_element, group_number = group_number, proc_id = proc_id) 1106 1107 # Extract max number of loop couplings (specific to this output type) 1108 matrix_element.rep_dict['maxlcouplings']= \ 1109 matrix_element.find_max_loop_coupling() 1110 # The born amp declaration suited for also outputing the loop-induced 1111 # processes as well. 1112 if matrix_element.get('processes')[0].get('has_born'): 1113 matrix_element.rep_dict['dp_born_amps_decl_in_mp'] = \ 1114 matrix_element.rep_dict['complex_dp_format']+" DPAMP(NBORNAMPS,NCOMB)"+\ 1115 "\n common/%sAMPS/DPAMP"%matrix_element.rep_dict['proc_prefix'] 1116 matrix_element.rep_dict['dp_born_amps_decl'] = \ 1117 matrix_element.rep_dict['complex_dp_format']+" AMP(NBORNAMPS,NCOMB)"+\ 1118 "\n common/%sAMPS/AMP"%matrix_element.rep_dict['proc_prefix'] 1119 matrix_element.rep_dict['mp_born_amps_decl'] = \ 1120 matrix_element.rep_dict['complex_mp_format']+" AMP(NBORNAMPS,NCOMB)"+\ 1121 "\n common/%sMP_AMPS/AMP"%matrix_element.rep_dict['proc_prefix'] 1122 matrix_element.rep_dict['copy_mp_to_dp_born_amps'] = \ 1123 '\n'.join(['DO I=1,NBORNAMPS','DPAMP(I,H)=AMP(I,H)','ENDDO']) 1124 1125 if writer: 1126 raise MadGraph5Error, 'Matrix output mode no longer supported.' 1127 1128 filename = 'loop_matrix.f' 1129 calls = self.write_loopmatrix(writers.FortranWriter(filename), 1130 matrix_element, 1131 LoopFortranModel) 1132 1133 # Write out the proc_prefix in a file, this is quite handy 1134 proc_prefix_writer = writers.FortranWriter('proc_prefix.txt','w') 1135 proc_prefix_writer.write(matrix_element.rep_dict['proc_prefix']) 1136 proc_prefix_writer.close() 1137 1138 filename = 'check_sa.f' 1139 self.write_check_sa(writers.FortranWriter(filename),matrix_element) 1140 1141 filename = 'CT_interface.f' 1142 self.write_CT_interface(writers.FortranWriter(filename),\ 1143 matrix_element) 1144 1145 1146 1147 filename = 'improve_ps.f' 1148 calls = self.write_improve_ps(writers.FortranWriter(filename), 1149 matrix_element) 1150 1151 filename = 'loop_num.f' 1152 self.write_loop_num(writers.FortranWriter(filename),\ 1153 matrix_element,LoopFortranModel) 1154 1155 filename = 'mp_born_amps_and_wfs.f' 1156 self.write_born_amps_and_wfs(writers.FortranWriter(filename),\ 1157 matrix_element,LoopFortranModel) 1158 1159 # Extract number of external particles 1160 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 1161 filename = 'nexternal.inc' 1162 self.write_nexternal_file(writers.FortranWriter(filename), 1163 nexternal, ninitial) 1164 1165 filename = 'process_info.inc' 1166 self.write_process_info_file(writers.FortranWriter(filename), 1167 matrix_element) 1168 return calls
1169
1170 - def write_process_info_file(self, writer, matrix_element):
1171 """A small structural function to write the include file specifying some 1172 process characteristics.""" 1173 1174 model = matrix_element.get('processes')[0].get('model') 1175 process_info = {} 1176 # The maximum spin of any particle connected (or directly running in) 1177 # any loop of this matrix element. This is important because there is 1178 # some limitation in the stability tests that can be performed when this 1179 # maximum spin is above 3 (vectors). Also CutTools has limitations in 1180 # this regard. 1181 process_info['max_spin_connected_to_loop']=\ 1182 matrix_element.get_max_spin_connected_to_loop() 1183 1184 process_info['max_spin_external_particle']= max( 1185 model.get_particle(l.get('id')).get('spin') for l in 1186 matrix_element.get('processes')[0].get('legs')) 1187 1188 proc_include = \ 1189 """ 1190 INTEGER MAX_SPIN_CONNECTED_TO_LOOP 1191 PARAMETER(MAX_SPIN_CONNECTED_TO_LOOP=%(max_spin_connected_to_loop)d) 1192 INTEGER MAX_SPIN_EXTERNAL_PARTICLE 1193 PARAMETER(MAX_SPIN_EXTERNAL_PARTICLE=%(max_spin_external_particle)d) 1194 """%process_info 1195 1196 writer.writelines(proc_include)
1197
1198 - def generate_subprocess_directory(self, matrix_element, fortran_model):
1199 """ To overload the default name for this function such that the correct 1200 function is used when called from the command interface """ 1201 1202 self.unique_id +=1 1203 return self.generate_loop_subprocess(matrix_element,fortran_model, 1204 unique_id=self.unique_id)
1205
1206 - def write_check_sa(self, writer, matrix_element):
1207 """Writes out the steering code check_sa. In the optimized output mode, 1208 All the necessary entries in the replace_dictionary have already been 1209 set in write_loopmatrix because it is only there that one has access to 1210 the information about split orders.""" 1211 replace_dict = copy.copy(matrix_element.rep_dict) 1212 for key in ['print_so_born_results','print_so_loop_results', 1213 'write_so_born_results','write_so_loop_results','set_coupling_target']: 1214 if key not in replace_dict.keys(): 1215 replace_dict[key]='' 1216 1217 if matrix_element.get('processes')[0].get('has_born'): 1218 file = open(os.path.join(self.template_dir,'check_sa.inc')).read() 1219 else: 1220 file = open(os.path.join(self.template_dir,\ 1221 'check_sa_loop_induced.inc')).read() 1222 file=file%replace_dict 1223 writer.writelines(file) 1224 1225 # We can always write the f2py wrapper if present (in loop optimized mode, it is) 1226 if not os.path.isfile(pjoin(self.template_dir,'check_py.f.inc')): 1227 return 1228 1229 file = open(os.path.join(self.template_dir,\ 1230 'check_py.f.inc')).read() 1231 1232 if 'prefix' in self.cmd_options and self.cmd_options['prefix'] in ['int','proc']: 1233 replace_dict['prefix_routine'] = replace_dict['proc_prefix'] 1234 else: 1235 replace_dict['prefix_routine'] = '' 1236 file=file%replace_dict 1237 new_path = writer.name.replace('check_sa.f', 'f2py_wrapper.f') 1238 new_writer = writer.__class__(new_path, 'w') 1239 new_writer.writelines(file) 1240 1241 file = open(os.path.join(self.template_dir,\ 1242 'check_sa.py.inc')).read() 1243 # For now just put in an empty PS point but in the future, maybe generate 1244 # a valid one already here by default 1245 curr_proc = matrix_element.get('processes')[0] 1246 random_PSpoint_python_formatted = \ 1247 """# Specify your chosen PS point below. If you leave it filled with None, then the script will attempt to read it from the file PS.input. 1248 p= [[None,]*4]*%d"""%len(curr_proc.get('legs')) 1249 1250 process_definition_string = curr_proc.nice_string().replace('Process:','') 1251 file=file.format(random_PSpoint_python_formatted,process_definition_string, 1252 replace_dict['proc_prefix'].lower()) 1253 new_path = writer.name.replace('check_sa.f', 'check_sa.py') 1254 new_writer = open(new_path, 'w') 1255 new_writer.writelines(file) 1256 # Make it executable 1257 os.chmod(new_path, os.stat(new_path).st_mode | stat.S_IEXEC)
1258
1259 - def write_improve_ps(self, writer, matrix_element):
1260 """ Write out the improve_ps subroutines which modify the PS point 1261 given in input and slightly deform it to achieve exact onshellness on 1262 all external particles as well as perfect energy-momentum conservation""" 1263 replace_dict = copy.copy(matrix_element.rep_dict) 1264 1265 (nexternal,ninitial)=matrix_element.get_nexternal_ninitial() 1266 replace_dict['ninitial']=ninitial 1267 mass_list=matrix_element.get_external_masses()[:-2] 1268 mp_variable_prefix = check_param_card.ParamCard.mp_prefix 1269 1270 # Write the quadruple precision version of this routine only. 1271 replace_dict['real_format']=replace_dict['real_mp_format'] 1272 replace_dict['mp_prefix']='MP_' 1273 replace_dict['exp_letter']='e' 1274 replace_dict['mp_specifier']='_16' 1275 replace_dict['coupl_inc_name']='mp_coupl.inc' 1276 replace_dict['masses_def']='\n'.join(['MASSES(%(i)d)=%(prefix)s%(m)s'\ 1277 %{'i':i+1,'m':m, 'prefix':mp_variable_prefix} for \ 1278 i, m in enumerate(mass_list)]) 1279 file_mp = open(os.path.join(self.template_dir,'improve_ps.inc')).read() 1280 file_mp=file_mp%replace_dict 1281 # 1282 writer.writelines(file_mp)
1283
1284 - def write_loop_num(self, writer, matrix_element,fortran_model):
1285 """ Create the file containing the core subroutine called by CutTools 1286 which contains the Helas calls building the loop""" 1287 1288 if not matrix_element.get('processes') or \ 1289 not matrix_element.get('diagrams'): 1290 return 0 1291 1292 # Set lowercase/uppercase Fortran code 1293 writers.FortranWriter.downcase = False 1294 1295 file = open(os.path.join(self.template_dir,'loop_num.inc')).read() 1296 1297 replace_dict = copy.copy(matrix_element.rep_dict) 1298 1299 loop_helas_calls=fortran_model.get_loop_amplitude_helas_calls(matrix_element) 1300 replace_dict['maxlcouplings']=matrix_element.find_max_loop_coupling() 1301 replace_dict['loop_helas_calls'] = "\n".join(loop_helas_calls) 1302 1303 # The squaring is only necessary for the processes with born where the 1304 # sum over helicities is done before sending the numerator to CT. 1305 dp_squaring_lines=['DO I=1,NBORNAMPS', 1306 'CFTOT=DCMPLX(CF_N(AMPLNUM,I)/DBLE(ABS(CF_D(AMPLNUM,I))),0.0d0)', 1307 'IF(CF_D(AMPLNUM,I).LT.0) CFTOT=CFTOT*IMAG1', 1308 'RES=RES+CFTOT*BUFF*DCONJG(AMP(I,H))','ENDDO'] 1309 mp_squaring_lines=['DO I=1,NBORNAMPS', 1310 'CFTOT=CMPLX(CF_N(AMPLNUM,I)/(1.0E0_16*ABS(CF_D(AMPLNUM,I))),0.0E0_16,KIND=16)', 1311 'IF(CF_D(AMPLNUM,I).LT.0) CFTOT=CFTOT*IMAG1', 1312 'QPRES=QPRES+CFTOT*BUFF*CONJG(AMP(I,H))','ENDDO'] 1313 if matrix_element.get('processes')[0].get('has_born'): 1314 replace_dict['dp_squaring']='\n'.join(dp_squaring_lines) 1315 replace_dict['mp_squaring']='\n'.join(mp_squaring_lines) 1316 else: 1317 replace_dict['dp_squaring']='RES=BUFF' 1318 replace_dict['mp_squaring']='QPRES=BUFF' 1319 1320 # Prepend MP_ to all helas calls. 1321 self.turn_to_mp_calls(loop_helas_calls) 1322 replace_dict['mp_loop_helas_calls'] = "\n".join(loop_helas_calls) 1323 1324 file=file%replace_dict 1325 1326 if writer: 1327 writer.writelines(file) 1328 else: 1329 return file
1330
1331 - def write_CT_interface(self, writer, matrix_element, optimized_output=False):
1332 """ Create the file CT_interface.f which contains the subroutine defining 1333 the loop HELAS-like calls along with the general interfacing subroutine. 1334 It is used to interface against any OPP tool, including Samurai and Ninja.""" 1335 1336 files=[] 1337 1338 # First write CT_interface which interfaces MG5 with CutTools. 1339 replace_dict=copy.copy(matrix_element.rep_dict) 1340 1341 # We finalize CT result differently wether we used the built-in 1342 # squaring against the born. 1343 if matrix_element.get('processes')[0].get('has_born'): 1344 replace_dict['finalize_CT']='\n'.join([\ 1345 'RES(%d)=NORMALIZATION*2.0d0*DBLE(RES(%d))'%(i,i) for i in range(1,4)]) 1346 else: 1347 replace_dict['finalize_CT']='\n'.join([\ 1348 'RES(%d)=NORMALIZATION*RES(%d)'%(i,i) for i in range(1,4)]) 1349 1350 file = open(os.path.join(self.template_dir,'CT_interface.inc')).read() 1351 1352 file = file % replace_dict 1353 files.append(file) 1354 1355 # Now collect the different kind of subroutines needed for the 1356 # loop HELAS-like calls. 1357 HelasLoopAmpsCallKeys=matrix_element.get_used_helas_loop_amps() 1358 1359 for callkey in HelasLoopAmpsCallKeys: 1360 replace_dict=copy.copy(matrix_element.rep_dict) 1361 # Add to this dictionary all other attribute common to all 1362 # HELAS-like loop subroutines. 1363 if matrix_element.get('processes')[0].get('has_born'): 1364 replace_dict['validh_or_nothing']=',validh' 1365 else: 1366 replace_dict['validh_or_nothing']='' 1367 # In the optimized output, the number of couplings in the loop is 1368 # not specified so we only treat it here if necessary: 1369 if len(callkey)>2: 1370 replace_dict['ncplsargs']=callkey[2] 1371 cplsargs="".join(["C%d,MP_C%d, "%(i,i) for i in range(1,callkey[2]+1)]) 1372 replace_dict['cplsargs']=cplsargs 1373 cplsdecl="".join(["C%d, "%i for i in range(1,callkey[2]+1)])[:-2] 1374 replace_dict['cplsdecl']=cplsdecl 1375 mp_cplsdecl="".join(["MP_C%d, "%i for i in range(1,callkey[2]+1)])[:-2] 1376 replace_dict['mp_cplsdecl']=mp_cplsdecl 1377 cplset="\n".join(["\n".join(["LC(%d)=C%d"%(i,i),\ 1378 "MP_LC(%d)=MP_C%d"%(i,i)])\ 1379 for i in range(1,callkey[2]+1)]) 1380 replace_dict['cplset']=cplset 1381 1382 replace_dict['nloopline']=callkey[0] 1383 wfsargs="".join(["W%d, "%i for i in range(1,callkey[1]+1)]) 1384 replace_dict['wfsargs']=wfsargs 1385 # We don't pass the multiple precision mass in the optimized_output 1386 if not optimized_output: 1387 margs="".join(["M%d,MP_M%d, "%(i,i) for i in range(1,callkey[0]+1)]) 1388 else: 1389 margs="".join(["M%d, "%i for i in range(1,callkey[0]+1)]) 1390 replace_dict['margs']=margs 1391 wfsargsdecl="".join([("W%d, "%i) for i in range(1,callkey[1]+1)])[:-2] 1392 replace_dict['wfsargsdecl']=wfsargsdecl 1393 margsdecl="".join(["M%d, "%i for i in range(1,callkey[0]+1)])[:-2] 1394 replace_dict['margsdecl']=margsdecl 1395 mp_margsdecl="".join(["MP_M%d, "%i for i in range(1,callkey[0]+1)])[:-2] 1396 replace_dict['mp_margsdecl']=mp_margsdecl 1397 weset="\n".join([("WE("+str(i)+")=W"+str(i)) for \ 1398 i in range(1,callkey[1]+1)]) 1399 replace_dict['weset']=weset 1400 weset="\n".join([("WE(%d)=W%d"%(i,i)) for i in range(1,callkey[1]+1)]) 1401 replace_dict['weset']=weset 1402 msetlines=["M2L(1)=M%d**2"%(callkey[0]),] 1403 mset="\n".join(msetlines+["M2L(%d)=M%d**2"%(i,i-1) for \ 1404 i in range(2,callkey[0]+1)]) 1405 replace_dict['mset']=mset 1406 mset2lines=["ML(1)=M%d"%(callkey[0]),"ML(2)=M%d"%(callkey[0]), 1407 "MP_ML(1)=MP_M%d"%(callkey[0]),"MP_ML(2)=MP_M%d"%(callkey[0])] 1408 mset2="\n".join(mset2lines+["\n".join(["ML(%d)=M%d"%(i,i-2), 1409 "MP_ML(%d)=MP_M%d"%(i,i-2)]) for \ 1410 i in range(3,callkey[0]+3)]) 1411 replace_dict['mset2']=mset2 1412 replace_dict['nwfsargs'] = callkey[1] 1413 if callkey[0]==callkey[1]: 1414 replace_dict['nwfsargs_header'] = "" 1415 replace_dict['pairingargs']="" 1416 replace_dict['pairingdecl']="" 1417 pairingset="""DO I=1,NLOOPLINE 1418 PAIRING(I)=1 1419 ENDDO 1420 """ 1421 replace_dict['pairingset']=pairingset 1422 else: 1423 replace_dict['nwfsargs_header'] = '_%d'%callkey[1] 1424 pairingargs="".join([("P"+str(i)+", ") for i in \ 1425 range(1,callkey[0]+1)]) 1426 replace_dict['pairingargs']=pairingargs 1427 pairingdecl="integer "+"".join([("P"+str(i)+", ") for i in \ 1428 range(1,callkey[0]+1)])[:-2] 1429 replace_dict['pairingdecl']=pairingdecl 1430 pairingset="\n".join([("PAIRING("+str(i)+")=P"+str(i)) for \ 1431 i in range(1,callkey[0]+1)]) 1432 replace_dict['pairingset']=pairingset 1433 1434 file = open(os.path.join(self.template_dir,\ 1435 'helas_loop_amplitude.inc')).read() 1436 file = file % replace_dict 1437 files.append(file) 1438 1439 file="\n".join(files) 1440 1441 if writer: 1442 writer.writelines(file,context=self.get_context(matrix_element)) 1443 else: 1444 return file
1445 1446 # Helper function to split HELAS CALLS in dedicated subroutines placed 1447 # in different files.
1448 - def split_HELASCALLS(self, writer, replace_dict, template_name, masterfile, \ 1449 helas_calls, entry_name, bunch_name,n_helas=2000, 1450 required_so_broadcaster = 'LOOP_REQ_SO_DONE', 1451 continue_label = 1000, momenta_array_name='P', 1452 context={}):
1453 """ Finish the code generation with splitting. 1454 Split the helas calls in the argument helas_calls into bunches of 1455 size n_helas and place them in dedicated subroutine with name 1456 <bunch_name>_i. Also setup the corresponding calls to these subroutine 1457 in the replace_dict dictionary under the entry entry_name. 1458 The context specified will be forwarded to the the fileWriter.""" 1459 helascalls_replace_dict=copy.copy(replace_dict) 1460 helascalls_replace_dict['bunch_name']=bunch_name 1461 helascalls_files=[] 1462 for i, k in enumerate(range(0, len(helas_calls), n_helas)): 1463 helascalls_replace_dict['bunch_number']=i+1 1464 helascalls_replace_dict['helas_calls']=\ 1465 '\n'.join(helas_calls[k:k + n_helas]) 1466 helascalls_replace_dict['required_so_broadcaster']=\ 1467 required_so_broadcaster 1468 helascalls_replace_dict['continue_label']=continue_label 1469 new_helascalls_file = open(os.path.join(self.template_dir,\ 1470 template_name)).read() 1471 new_helascalls_file = new_helascalls_file % helascalls_replace_dict 1472 helascalls_files.append(new_helascalls_file) 1473 # Setup the call to these HELASCALLS subroutines in loop_matrix.f 1474 helascalls_calls = [ "CALL %s%s_%d(%s,NHEL,H,IC)"%\ 1475 (replace_dict['proc_prefix'] ,bunch_name,a+1,momenta_array_name) \ 1476 for a in range(len(helascalls_files))] 1477 replace_dict[entry_name]='\n'.join(helascalls_calls) 1478 if writer: 1479 for i, helascalls_file in enumerate(helascalls_files): 1480 filename = '%s_%d.f'%(bunch_name,i+1) 1481 writers.FortranWriter(filename).writelines(helascalls_file, 1482 context=context) 1483 else: 1484 masterfile='\n'.join([masterfile,]+helascalls_files) 1485 1486 return masterfile
1487
1488 - def write_loopmatrix(self, writer, matrix_element, fortran_model, 1489 noSplit=False):
1490 """Create the loop_matrix.f file.""" 1491 1492 if not matrix_element.get('processes') or \ 1493 not matrix_element.get('diagrams'): 1494 return 0 1495 1496 # Set lowercase/uppercase Fortran code 1497 1498 writers.FortranWriter.downcase = False 1499 1500 replace_dict = copy.copy(matrix_element.rep_dict) 1501 1502 # Extract overall denominator 1503 # Averaging initial state color, spin, and identical FS particles 1504 den_factor_line = self.get_den_factor_line(matrix_element) 1505 replace_dict['den_factor_line'] = den_factor_line 1506 # When the user asks for the polarized matrix element we must 1507 # multiply back by the helicity averaging factor 1508 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor() 1509 replace_dict['beamone_helavgfactor'], replace_dict['beamtwo_helavgfactor'] =\ 1510 matrix_element.get_beams_hel_avg_factor() 1511 1512 # These entries are specific for the output for loop-induced processes 1513 # Also sets here the details of the squaring of the loop ampltiudes 1514 # with the born or the loop ones. 1515 if not matrix_element.get('processes')[0].get('has_born'): 1516 replace_dict['compute_born']=\ 1517 """C There is of course no born for loop induced processes 1518 ANS(0)=0.0d0 1519 """ 1520 replace_dict['set_reference']='\n'.join([ 1521 'C For loop-induced, the reference for comparison is set later'+\ 1522 ' from the total contribution of the previous PS point considered.', 1523 'C But you can edit here the value to be used for the first PS point.', 1524 'if (NPSPOINTS.eq.0) then','ref=1.0d-50','else', 1525 'ref=nextRef/DBLE(NPSPOINTS)','endif']) 1526 replace_dict['loop_induced_setup'] = '\n'.join([ 1527 'HELPICKED_BU=HELPICKED','HELPICKED=H','MP_DONE=.FALSE.', 1528 'IF(SKIPLOOPEVAL) THEN','GOTO 1227','ENDIF']) 1529 replace_dict['loop_induced_finalize'] = \ 1530 ("""DO I=NCTAMPS+1,NLOOPAMPS 1531 IF((CTMODERUN.NE.-1).AND..NOT.CHECKPHASE.AND.(.NOT.S(I))) THEN 1532 WRITE(*,*) '##W03 WARNING Contribution ',I 1533 WRITE(*,*) ' is unstable for helicity ',H 1534 ENDIF 1535 C IF(.NOT.%(proc_prefix)sISZERO(ABS(AMPL(2,I))+ABS(AMPL(3,I)),REF,-1,H)) THEN 1536 C WRITE(*,*) '##W04 WARNING Contribution ',I,' for helicity ',H,' has a contribution to the poles.' 1537 C WRITE(*,*) 'Finite contribution = ',AMPL(1,I) 1538 C WRITE(*,*) 'single pole contribution = ',AMPL(2,I) 1539 C WRITE(*,*) 'double pole contribution = ',AMPL(3,I) 1540 C ENDIF 1541 ENDDO 1542 1227 CONTINUE 1543 HELPICKED=HELPICKED_BU""")%replace_dict 1544 replace_dict['loop_helas_calls']="" 1545 replace_dict['nctamps_or_nloopamps']='nloopamps' 1546 replace_dict['nbornamps_or_nloopamps']='nloopamps' 1547 replace_dict['squaring']=\ 1548 """ANS(1)=ANS(1)+DBLE(CFTOT*AMPL(1,I)*DCONJG(AMPL(1,J))) 1549 IF (J.EQ.1) THEN 1550 ANS(2)=ANS(2)+DBLE(CFTOT*AMPL(2,I))+DIMAG(CFTOT*AMPL(2,I)) 1551 ANS(3)=ANS(3)+DBLE(CFTOT*AMPL(3,I))+DIMAG(CFTOT*AMPL(3,I)) 1552 ENDIF""" 1553 else: 1554 replace_dict['compute_born']=\ 1555 """C Compute the born, for a specific helicity if asked so. 1556 call %(proc_prefix)ssmatrixhel(P_USER,USERHEL,ANS(0)) 1557 """%matrix_element.rep_dict 1558 replace_dict['set_reference']=\ 1559 """C We chose to use the born evaluation for the reference 1560 call %(proc_prefix)ssmatrix(p,ref)"""%matrix_element.rep_dict 1561 replace_dict['loop_induced_helas_calls'] = "" 1562 replace_dict['loop_induced_finalize'] = "" 1563 replace_dict['loop_induced_setup'] = "" 1564 replace_dict['nctamps_or_nloopamps']='nctamps' 1565 replace_dict['nbornamps_or_nloopamps']='nbornamps' 1566 replace_dict['squaring']='\n'.join(['DO K=1,3', 1567 'ANS(K)=ANS(K)+2.0d0*DBLE(CFTOT*AMPL(K,I)*DCONJG(AMP(J,H)))', 1568 'ENDDO']) 1569 1570 # Write a dummy nsquaredSO.inc which is used in the default 1571 # loop_matrix.f code (even though it does not support split orders evals) 1572 # just to comply with the syntax expected from the external code using MadLoop. 1573 writers.FortranWriter('nsquaredSO.inc').writelines( 1574 """INTEGER NSQUAREDSO 1575 PARAMETER (NSQUAREDSO=0)""") 1576 1577 # Actualize results from the loops computed. Only necessary for 1578 # processes with a born. 1579 actualize_ans=[] 1580 if matrix_element.get('processes')[0].get('has_born'): 1581 actualize_ans.append("DO I=NCTAMPS+1,NLOOPAMPS") 1582 actualize_ans.extend("ANS(%d)=ANS(%d)+AMPL(%d,I)"%(i,i,i) for i \ 1583 in range(1,4)) 1584 actualize_ans.append(\ 1585 "IF((CTMODERUN.NE.-1).AND..NOT.CHECKPHASE.AND.(.NOT.S(I))) THEN") 1586 actualize_ans.append(\ 1587 "WRITE(*,*) '##W03 WARNING Contribution ',I,' is unstable.'") 1588 actualize_ans.extend(["ENDIF","ENDDO"]) 1589 replace_dict['actualize_ans']='\n'.join(actualize_ans) 1590 else: 1591 replace_dict['actualize_ans']=\ 1592 ("""C We add five powers to the reference value to loosen a bit the vanishing pole check. 1593 C IF(.NOT.(CHECKPHASE.OR.(.NOT.HELDOUBLECHECKED)).AND..NOT.%(proc_prefix)sISZERO(ABS(ANS(2))+ABS(ANS(3)),ABS(ANS(1))*(10.0d0**5),-1,H)) THEN 1594 C WRITE(*,*) '##W05 WARNING Found a PS point with a contribution to the single pole.' 1595 C WRITE(*,*) 'Finite contribution = ',ANS(1) 1596 C WRITE(*,*) 'single pole contribution = ',ANS(2) 1597 C WRITE(*,*) 'double pole contribution = ',ANS(3) 1598 C ENDIF""")%replace_dict 1599 1600 # Write out the color matrix 1601 (CMNum,CMDenom) = self.get_color_matrix(matrix_element) 1602 CMWriter=open(pjoin('..','MadLoop5_resources', 1603 '%(proc_prefix)sColorNumFactors.dat'%matrix_element.rep_dict),'w') 1604 for ColorLine in CMNum: 1605 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 1606 CMWriter.close() 1607 CMWriter=open(pjoin('..','MadLoop5_resources', 1608 '%(proc_prefix)sColorDenomFactors.dat'%matrix_element.rep_dict),'w') 1609 for ColorLine in CMDenom: 1610 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 1611 CMWriter.close() 1612 1613 # Write out the helicity configurations 1614 HelConfigs=matrix_element.get_helicity_matrix() 1615 HelConfigWriter=open(pjoin('..','MadLoop5_resources', 1616 '%(proc_prefix)sHelConfigs.dat'%matrix_element.rep_dict),'w') 1617 for HelConfig in HelConfigs: 1618 HelConfigWriter.write(' '.join(['%d'%H for H in HelConfig])+'\n') 1619 HelConfigWriter.close() 1620 1621 # Extract helas calls 1622 loop_amp_helas_calls = fortran_model.get_loop_amp_helas_calls(\ 1623 matrix_element) 1624 # The proc_prefix must be replaced 1625 loop_amp_helas_calls = [lc % matrix_element.rep_dict 1626 for lc in loop_amp_helas_calls] 1627 1628 born_ct_helas_calls, UVCT_helas_calls = \ 1629 fortran_model.get_born_ct_helas_calls(matrix_element) 1630 # In the default output, we do not need to separate these two kind of 1631 # contributions 1632 born_ct_helas_calls = born_ct_helas_calls + UVCT_helas_calls 1633 file = open(os.path.join(self.template_dir,\ 1634 1635 'loop_matrix_standalone.inc')).read() 1636 1637 if matrix_element.get('processes')[0].get('has_born'): 1638 toBeRepaced='loop_helas_calls' 1639 else: 1640 toBeRepaced='loop_induced_helas_calls' 1641 1642 # Decide here wether we need to split the loop_matrix.f file or not. 1643 if (not noSplit and (len(matrix_element.get_all_amplitudes())>1000)): 1644 file=self.split_HELASCALLS(writer,replace_dict,\ 1645 'helas_calls_split.inc',file,born_ct_helas_calls,\ 1646 'born_ct_helas_calls','helas_calls_ampb') 1647 file=self.split_HELASCALLS(writer,replace_dict,\ 1648 'helas_calls_split.inc',file,loop_amp_helas_calls,\ 1649 toBeRepaced,'helas_calls_ampl') 1650 else: 1651 replace_dict['born_ct_helas_calls']='\n'.join(born_ct_helas_calls) 1652 replace_dict[toBeRepaced]='\n'.join(loop_amp_helas_calls) 1653 1654 file = file % replace_dict 1655 1656 loop_calls_finder = re.compile(r'^\s*CALL\S*LOOP\S*') 1657 n_loop_calls = len(filter(lambda call: 1658 not loop_calls_finder.match(call) is None, loop_amp_helas_calls)) 1659 if writer: 1660 # Write the file 1661 writer.writelines(file) 1662 return n_loop_calls 1663 else: 1664 # Return it to be written along with the others 1665 return n_loop_calls, file
1666
1667 - def write_bornmatrix(self, writer, matrix_element, fortran_model):
1668 """Create the born_matrix.f file for the born process as for a standard 1669 tree-level computation.""" 1670 1671 if not matrix_element.get('processes') or \ 1672 not matrix_element.get('diagrams'): 1673 return 0 1674 1675 if not isinstance(writer, writers.FortranWriter): 1676 raise writers.FortranWriter.FortranWriterError(\ 1677 "writer not FortranWriter") 1678 1679 # For now, we can use the exact same treatment as for tree-level 1680 # computations by redefining here a regular HelasMatrixElementf or the 1681 # born process. 1682 # It is important to make a deepcopy, as we don't want any possible 1683 # treatment on the objects of the bornME to have border effects on 1684 # the content of the LoopHelasMatrixElement object. 1685 bornME = helas_objects.HelasMatrixElement() 1686 for prop in bornME.keys(): 1687 bornME.set(prop,copy.deepcopy(matrix_element.get(prop))) 1688 bornME.set('base_amplitude',None,force=True) 1689 bornME.set('diagrams',copy.deepcopy(\ 1690 matrix_element.get_born_diagrams())) 1691 bornME.set('color_basis',copy.deepcopy(\ 1692 matrix_element.get('born_color_basis'))) 1693 bornME.set('color_matrix',copy.deepcopy(\ 1694 color_amp.ColorMatrix(bornME.get('color_basis')))) 1695 # This is to decide wether once to reuse old wavefunction to store new 1696 # ones (provided they are not used further in the code.) 1697 bornME.optimization = True 1698 return super(LoopProcessExporterFortranSA,self).write_matrix_element_v4( 1699 writer, bornME, fortran_model, 1700 proc_prefix=matrix_element.rep_dict['proc_prefix'])
1701
1702 - def write_born_amps_and_wfs(self, writer, matrix_element, fortran_model, 1703 noSplit=False):
1704 """ Writes out the code for the subroutine MP_BORN_AMPS_AND_WFS which 1705 computes just the external wavefunction and born amplitudes in 1706 multiple precision. """ 1707 1708 if not matrix_element.get('processes') or \ 1709 not matrix_element.get('diagrams'): 1710 return 0 1711 1712 replace_dict = copy.copy(matrix_element.rep_dict) 1713 1714 # For the wavefunction copy, check what suffix is needed for the W array 1715 if matrix_element.get('processes')[0].get('has_born'): 1716 replace_dict['h_w_suffix']=',H' 1717 else: 1718 replace_dict['h_w_suffix']='' 1719 1720 # Extract helas calls 1721 born_amps_and_wfs_calls , uvct_amp_calls = \ 1722 fortran_model.get_born_ct_helas_calls(matrix_element, include_CT=True) 1723 # In the default output, these two kind of contributions do not need to 1724 # be differentiated 1725 born_amps_and_wfs_calls = born_amps_and_wfs_calls + uvct_amp_calls 1726 1727 # Turn these HELAS calls to the multiple-precision version of the HELAS 1728 # subroutines. 1729 self.turn_to_mp_calls(born_amps_and_wfs_calls) 1730 1731 file = open(os.path.join(self.template_dir,\ 1732 'mp_born_amps_and_wfs.inc')).read() 1733 # Decide here wether we need to split the loop_matrix.f file or not. 1734 if (not noSplit and (len(matrix_element.get_all_amplitudes())>2000)): 1735 file=self.split_HELASCALLS(writer,replace_dict,\ 1736 'mp_helas_calls_split.inc',file,\ 1737 born_amps_and_wfs_calls,'born_amps_and_wfs_calls',\ 1738 'mp_helas_calls') 1739 else: 1740 replace_dict['born_amps_and_wfs_calls']=\ 1741 '\n'.join(born_amps_and_wfs_calls) 1742 1743 file = file % replace_dict 1744 if writer: 1745 # Write the file 1746 writer.writelines(file) 1747 else: 1748 # Return it to be written along with the others 1749 return file 1750 1751 #=============================================================================== 1752 # LoopProcessOptimizedExporterFortranSA 1753 #=============================================================================== 1754
1755 -class LoopProcessOptimizedExporterFortranSA(LoopProcessExporterFortranSA):
1756 """Class to take care of exporting a set of loop matrix elements in the 1757 Fortran format which exploits the Pozzorini method of representing 1758 the loop numerators as polynomial to render its evaluations faster.""" 1759 1760 template_dir=os.path.join(_file_path,'iolibs/template_files/loop_optimized') 1761 # The option below controls wether one wants to group together in one single 1762 # CutTools/TIR call the loops with same denominator structure 1763 forbid_loop_grouping = False 1764 1765 # List of potential TIR library one wants to link to. 1766 # Golem and Samurai will typically get obtained from gosam_contrib 1767 # which might also contain a version of ninja. We must therefore 1768 # make sure that ninja appears first in the list of -L because 1769 # it is the tool for which the user is most susceptible of 1770 # using a standalone verison independent of gosam_contrib 1771 all_tir=['pjfry','iregi','ninja','golem','samurai','collier'] 1772
1773 - def __init__(self, dir_path = "", opt=None):
1774 """Initiate the LoopProcessOptimizedExporterFortranSA with directory 1775 information on where to find all the loop-related source files, 1776 like CutTools and TIR""" 1777 1778 super(LoopProcessOptimizedExporterFortranSA,self).__init__(dir_path, opt) 1779 1780 # TIR available ones 1781 self.tir_available_dict={'pjfry':True,'iregi':True,'golem':True, 1782 'samurai':True,'ninja':True,'collier':True} 1783 1784 for tir in self.all_tir: 1785 tir_dir="%s_dir"%tir 1786 if tir_dir in self.opt and not self.opt[tir_dir] is None: 1787 # Make sure to defer the 'local path' to the current MG5aMC root. 1788 tir_path = self.opt[tir_dir].strip() 1789 if tir_path.startswith('.'): 1790 tir_path = os.path.abspath(pjoin(MG5DIR,tir_path)) 1791 setattr(self,tir_dir,tir_path) 1792 else: 1793 setattr(self,tir_dir,'')
1794
1795 - def copy_template(self, model):
1796 """Additional actions needed to setup the Template. 1797 """ 1798 1799 super(LoopProcessOptimizedExporterFortranSA, self).copy_template(model) 1800 1801 self.loop_optimized_additional_template_setup()
1802
1803 - def get_context(self,matrix_element, **opts):
1804 """ Additional contextual information which needs to be created for 1805 the optimized output.""" 1806 1807 context = LoopProcessExporterFortranSA.get_context(self, matrix_element, 1808 **opts) 1809 1810 # For now assume Ninja always supports quadruple precision 1811 try: 1812 context['ninja_supports_quad_prec'] = \ 1813 misc.get_ninja_quad_prec_support(getattr(self,'ninja_dir')) 1814 except AttributeError: 1815 context['ninja_supports_quad_prec'] = False 1816 1817 for tir in self.all_tir: 1818 context['%s_available'%tir]=self.tir_available_dict[tir] 1819 # safety check 1820 if tir not in ['golem','pjfry','iregi','samurai','ninja','collier']: 1821 raise MadGraph5Error,"%s was not a TIR currently interfaced."%tir_name 1822 1823 return context
1824
1826 """ Perform additional actions specific for this class when setting 1827 up the template with the copy_template function.""" 1828 1829 # We must link the TIR to the Library folder of the active Template 1830 link_tir_libs=[] 1831 tir_libs=[] 1832 tir_include=[] 1833 1834 for tir in self.all_tir: 1835 tir_dir="%s_dir"%tir 1836 libpath=getattr(self,tir_dir) 1837 libname="lib%s.a"%tir 1838 tir_name=tir 1839 libpath = self.link_TIR(os.path.join(self.dir_path, 'lib'), 1840 libpath,libname,tir_name=tir_name) 1841 if libpath != "": 1842 if tir in ['ninja','pjfry','golem','samurai','collier']: 1843 # It is cleaner to use the original location of the libraries 1844 link_tir_libs.append('-L%s/ -l%s'%(libpath,tir)) 1845 tir_libs.append('%s/lib%s.$(libext)'%(libpath,tir)) 1846 # For Ninja, we must also link against OneLoop. 1847 if tir in ['ninja']: 1848 if not any(os.path.isfile(pjoin(libpath,'libavh_olo.%s'%ext)) 1849 for ext in ['a','dylib','so']): 1850 raise MadGraph5Error( 1851 "The OneLOop library 'libavh_olo.(a|dylib|so)' could no be found in path '%s'. Please place a symlink to it there."%libpath) 1852 link_tir_libs.append('-L%s/ -l%s'%(libpath,'avh_olo')) 1853 tir_libs.append('%s/lib%s.$(libext)'%(libpath,'avh_olo')) 1854 if tir in ['ninja','golem', 'samurai','collier']: 1855 trgt_path = pjoin(os.path.dirname(libpath),'include') 1856 if os.path.isdir(trgt_path): 1857 to_include = misc.find_includes_path(trgt_path, 1858 self.include_names[tir]) 1859 else: 1860 to_include = None 1861 # Special possible location for collier 1862 if to_include is None and tir=='collier': 1863 to_include = misc.find_includes_path( 1864 pjoin(libpath,'modules'),self.include_names[tir]) 1865 if to_include is None: 1866 logger.error( 1867 'Could not find the include directory for %s, looking in %s.\n' % (tir, str(trgt_path))+ 1868 'Generation carries on but you will need to edit the include path by hand in the makefiles.') 1869 to_include = '<Not_found_define_it_yourself>' 1870 tir_include.append('-I %s'%str(to_include)) 1871 # To be able to easily compile a MadLoop library using 1872 # makefiles built outside of the MG5_aMC framework 1873 # (such as what is done with the Sherpa interface), we 1874 # place here an easy handle on the golem includes 1875 name_map = {'golem':'golem95','samurai':'samurai', 1876 'ninja':'ninja','collier':'collier'} 1877 ln(to_include, starting_dir=pjoin(self.dir_path,'lib'), 1878 name='%s_include'%name_map[tir],abspath=True) 1879 ln(libpath, starting_dir=pjoin(self.dir_path,'lib'), 1880 name='%s_lib'%name_map[tir],abspath=True) 1881 else : 1882 link_tir_libs.append('-l%s'%tir) 1883 tir_libs.append('$(LIBDIR)lib%s.$(libext)'%tir) 1884 1885 MadLoop_makefile_definitions = pjoin(self.dir_path,'SubProcesses', 1886 'MadLoop_makefile_definitions') 1887 if os.path.isfile(MadLoop_makefile_definitions): 1888 os.remove(MadLoop_makefile_definitions) 1889 1890 calls = self.write_loop_makefile_definitions( 1891 writers.MakefileWriter(MadLoop_makefile_definitions), 1892 link_tir_libs,tir_libs, tir_include=tir_include) 1893 1894 # Finally overwrite MadLoopCommons.f now that we know the availibility of 1895 # COLLIER. 1896 MadLoopCommon = open(os.path.join(self.loop_dir,'StandAlone', 1897 "SubProcesses","MadLoopCommons.inc")).read() 1898 writer = writers.FortranWriter(os.path.join(self.dir_path, 1899 "SubProcesses","MadLoopCommons.f")) 1900 writer.writelines(MadLoopCommon%{ 1901 'print_banner_commands':self.MadLoop_banner}, context={ 1902 'collier_available':self.tir_available_dict['collier']}) 1903 writer.close()
1904 1916 1917 2045
2046 - def set_group_loops(self, matrix_element):
2047 """ Decides whether we must group loops or not for this matrix element""" 2048 2049 # Decide if loops sharing same denominator structures have to be grouped 2050 # together or not. 2051 if self.forbid_loop_grouping: 2052 self.group_loops = False 2053 else: 2054 self.group_loops = (not self.get_context(matrix_element)['ComputeColorFlows'])\ 2055 and matrix_element.get('processes')[0].get('has_born') 2056 2057 return self.group_loops
2058
2059 - def finalize(self, matrix_element, cmdhistory, MG5options, outputflag):
2060 """create the global information for loops""" 2061 2062 super(LoopProcessOptimizedExporterFortranSA,self).finalize(matrix_element, 2063 cmdhistory, MG5options, outputflag) 2064 self.write_global_specs(matrix_element)
2065 2066 2067
2068 - def write_loop_matrix_element_v4(self, writer, matrix_element, fortran_model, 2069 group_number = None, proc_id = None, config_map = None):
2070 """ Writes loop_matrix.f, CT_interface.f,TIR_interface.f,GOLEM_inteface.f 2071 and loop_num.f only but with the optimized FortranModel. 2072 The arguments group_number and proc_id are just for the LoopInduced 2073 output with MadEvent and only used in get_ME_identifier.""" 2074 2075 # Warn the user that the 'matrix' output where all relevant code is 2076 # put together in a single file is not supported in this loop output. 2077 if writer: 2078 raise MadGraph5Error, 'Matrix output mode no longer supported.' 2079 2080 if not isinstance(fortran_model,\ 2081 helas_call_writers.FortranUFOHelasCallWriter): 2082 raise MadGraph5Error, 'The optimized loop fortran output can only'+\ 2083 ' work with a UFO Fortran model' 2084 OptimizedFortranModel=\ 2085 helas_call_writers.FortranUFOHelasCallWriterOptimized(\ 2086 fortran_model.get('model'),False) 2087 2088 2089 if not matrix_element.get('processes')[0].get('has_born') and \ 2090 not self.compute_color_flows: 2091 logger.debug("Color flows will be employed despite the option"+\ 2092 " 'loop_color_flows' being set to False because it is necessary"+\ 2093 " for optimizations.") 2094 2095 # Compute the analytical information of the loop wavefunctions in the 2096 # loop helas matrix elements using the cached aloha model to reuse 2097 # as much as possible the aloha computations already performed for 2098 # writing out the aloha fortran subroutines. 2099 matrix_element.compute_all_analytic_information( 2100 self.get_aloha_model(matrix_element.get('processes')[0].get('model'))) 2101 2102 self.set_group_loops(matrix_element) 2103 2104 # Initialize a general replacement dictionary with entries common to 2105 # many files generated here. 2106 matrix_element.rep_dict = LoopProcessExporterFortranSA.\ 2107 generate_general_replace_dict(self, matrix_element, 2108 group_number = group_number, proc_id = proc_id) 2109 2110 # and those specific to the optimized output 2111 self.set_optimized_output_specific_replace_dict_entries(matrix_element) 2112 2113 # Create the necessary files for the loop matrix element subroutine 2114 proc_prefix_writer = writers.FortranWriter('proc_prefix.txt','w') 2115 proc_prefix_writer.write(matrix_element.rep_dict['proc_prefix']) 2116 proc_prefix_writer.close() 2117 2118 filename = 'loop_matrix.f' 2119 calls = self.write_loopmatrix(writers.FortranWriter(filename), 2120 matrix_element, 2121 OptimizedFortranModel) 2122 2123 filename = 'check_sa.f' 2124 self.write_check_sa(writers.FortranWriter(filename),matrix_element) 2125 2126 filename = 'polynomial.f' 2127 calls = self.write_polynomial_subroutines( 2128 writers.FortranWriter(filename), 2129 matrix_element) 2130 2131 filename = 'improve_ps.f' 2132 calls = self.write_improve_ps(writers.FortranWriter(filename), 2133 matrix_element) 2134 2135 filename = 'CT_interface.f' 2136 self.write_CT_interface(writers.FortranWriter(filename),\ 2137 matrix_element) 2138 2139 filename = 'TIR_interface.f' 2140 self.write_TIR_interface(writers.FortranWriter(filename), 2141 matrix_element) 2142 2143 if 'golem' in self.tir_available_dict and self.tir_available_dict['golem']: 2144 filename = 'GOLEM_interface.f' 2145 self.write_GOLEM_interface(writers.FortranWriter(filename), 2146 matrix_element) 2147 2148 if 'collier' in self.tir_available_dict and self.tir_available_dict['collier']: 2149 filename = 'COLLIER_interface.f' 2150 self.write_COLLIER_interface(writers.FortranWriter(filename), 2151 matrix_element) 2152 2153 filename = 'loop_num.f' 2154 self.write_loop_num(writers.FortranWriter(filename),\ 2155 matrix_element,OptimizedFortranModel) 2156 2157 filename = 'mp_compute_loop_coefs.f' 2158 self.write_mp_compute_loop_coefs(writers.FortranWriter(filename),\ 2159 matrix_element,OptimizedFortranModel) 2160 2161 if self.get_context(matrix_element)['ComputeColorFlows']: 2162 filename = 'compute_color_flows.f' 2163 self.write_compute_color_flows(writers.FortranWriter(filename), 2164 matrix_element, config_map = config_map) 2165 2166 # Extract number of external particles 2167 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 2168 filename = 'nexternal.inc' 2169 self.write_nexternal_file(writers.FortranWriter(filename), 2170 nexternal, ninitial) 2171 2172 # Write general process information 2173 filename = 'process_info.inc' 2174 self.write_process_info_file(writers.FortranWriter(filename), 2175 matrix_element) 2176 2177 if self.get_context(matrix_element)['TIRCaching']: 2178 filename = 'tir_cache_size.inc' 2179 self.write_tir_cache_size_include(writers.FortranWriter(filename)) 2180 2181 return calls
2182
2183 - def set_optimized_output_specific_replace_dict_entries(self, matrix_element):
2184 """ Specify the entries of the replacement dictionary which are specific 2185 to the optimized output and only relevant to it (the more general entries 2186 are set in the the mother class LoopProcessExporterFortranSA.""" 2187 2188 max_loop_rank=matrix_element.get_max_loop_rank() 2189 matrix_element.rep_dict['maxrank']=max_loop_rank 2190 matrix_element.rep_dict['loop_max_coefs']=\ 2191 q_polynomial.get_number_of_coefs_for_rank(max_loop_rank) 2192 max_loop_vertex_rank=matrix_element.get_max_loop_vertex_rank() 2193 matrix_element.rep_dict['vertex_max_coefs']=\ 2194 q_polynomial.get_number_of_coefs_for_rank(max_loop_vertex_rank) 2195 2196 matrix_element.rep_dict['nloopwavefuncs']=\ 2197 matrix_element.get_number_of_loop_wavefunctions() 2198 max_spin=matrix_element.get_max_loop_particle_spin() 2199 2200 matrix_element.rep_dict['max_lwf_size']= 4 if max_spin <=3 else 16 2201 matrix_element.rep_dict['nloops']=len(\ 2202 [1 for ldiag in matrix_element.get_loop_diagrams() for \ 2203 lamp in ldiag.get_loop_amplitudes()]) 2204 2205 if self.set_group_loops(matrix_element): 2206 matrix_element.rep_dict['nloop_groups']=\ 2207 len(matrix_element.get('loop_groups')) 2208 else: 2209 matrix_element.rep_dict['nloop_groups']=\ 2210 matrix_element.rep_dict['nloops']
2211
2212 - def write_loop_num(self, writer, matrix_element,fortran_model):
2213 """ Create the file containing the core subroutine called by CutTools 2214 which contains the Helas calls building the loop""" 2215 2216 replace_dict=copy.copy(matrix_element.rep_dict) 2217 2218 file = open(os.path.join(self.template_dir,'loop_num.inc')).read() 2219 file = file % replace_dict 2220 writer.writelines(file,context=self.get_context(matrix_element))
2221
2222 - def write_CT_interface(self, writer, matrix_element):
2223 """ We can re-use the mother one for the loop optimized output.""" 2224 LoopProcessExporterFortranSA.write_CT_interface(\ 2225 self, writer, matrix_element,optimized_output=True)
2226
2227 - def write_TIR_interface(self, writer, matrix_element):
2228 """ Create the file TIR_interface.f which does NOT contain the subroutine 2229 defining the loop HELAS-like calls along with the general interfacing 2230 subroutine. """ 2231 2232 # First write TIR_interface which interfaces MG5 with TIR. 2233 replace_dict=copy.copy(matrix_element.rep_dict) 2234 2235 file = open(os.path.join(self.template_dir,'TIR_interface.inc')).read() 2236 2237 # Check which loops have an Higgs effective vertex so as to correctly 2238 # implement CutTools limitation 2239 loop_groups = matrix_element.get('loop_groups') 2240 has_HEFT_vertex = [False]*len(loop_groups) 2241 for i, (denom_structure, loop_amp_list) in enumerate(loop_groups): 2242 for lamp in loop_amp_list: 2243 final_lwf = lamp.get_final_loop_wavefunction() 2244 while not final_lwf is None: 2245 # We define here an HEFT vertex as any vertex built up from 2246 # only massless vectors and massive scalars (at least one of each) 2247 # We ask for massive scalars in part to remove the gluon ghost false positive. 2248 scalars = len([1 for wf in final_lwf.get('mothers') if 2249 wf.get('spin')==1 and wf.get('mass')!='ZERO']) 2250 vectors = len([1 for wf in final_lwf.get('mothers') if 2251 wf.get('spin')==3 and wf.get('mass')=='ZERO']) 2252 if scalars>=1 and vectors>=1 and \ 2253 scalars+vectors == len(final_lwf.get('mothers')): 2254 has_HEFT_vertex[i] = True 2255 break 2256 final_lwf = final_lwf.get_loop_mother() 2257 else: 2258 continue 2259 break 2260 2261 has_HEFT_list = [] 2262 chunk_size = 9 2263 for k in xrange(0, len(has_HEFT_vertex), chunk_size): 2264 has_HEFT_list.append("DATA (HAS_AN_HEFT_VERTEX(I),I=%6r,%6r) /%s/" % \ 2265 (k + 1, min(k + chunk_size, len(has_HEFT_vertex)), 2266 ','.join(['.TRUE.' if l else '.FALSE.' for l in 2267 has_HEFT_vertex[k:k + chunk_size]]))) 2268 replace_dict['has_HEFT_list'] = '\n'.join(has_HEFT_list) 2269 2270 file = file % replace_dict 2271 2272 FPR = q_polynomial.FortranPolynomialRoutines( 2273 replace_dict['maxrank'],coef_format=replace_dict['complex_dp_format'],\ 2274 sub_prefix=replace_dict['proc_prefix']) 2275 if self.tir_available_dict['pjfry']: 2276 file += '\n\n'+FPR.write_pjfry_mapping() 2277 if self.tir_available_dict['iregi']: 2278 file += '\n\n'+FPR.write_iregi_mapping() 2279 2280 if writer: 2281 writer.writelines(file,context=self.get_context(matrix_element)) 2282 else: 2283 return file
2284
2285 - def write_COLLIER_interface(self, writer, matrix_element):
2286 """ Create the file COLLIER_interface.f""" 2287 2288 # First write GOLEM_interface which interfaces MG5 with TIR. 2289 replace_dict=copy.copy(matrix_element.rep_dict) 2290 2291 file = open(os.path.join(self.template_dir,'COLLIER_interface.inc')).read() 2292 2293 FPR = q_polynomial.FortranPolynomialRoutines(replace_dict['maxrank'],\ 2294 coef_format=replace_dict['complex_dp_format'],\ 2295 sub_prefix=replace_dict['proc_prefix']) 2296 map_definition = [] 2297 collier_map = FPR.get_COLLIER_mapping() 2298 2299 chunk_size = 10 2300 for map_name, indices_list in \ 2301 [('COEFMAP_ZERO',[c[0] for c in collier_map]), 2302 ('COEFMAP_ONE',[c[1] for c in collier_map]), 2303 ('COEFMAP_TWO',[c[2] for c in collier_map]), 2304 ('COEFMAP_THREE',[c[3] for c in collier_map])]: 2305 for k in xrange(0, len(indices_list), chunk_size): 2306 map_definition.append("DATA (%s(I),I=%3r,%3r) /%s/" % \ 2307 (map_name,k, min(k + chunk_size, len(indices_list))-1, 2308 ','.join('%2r'%ind for ind in indices_list[k:k + chunk_size]))) 2309 2310 replace_dict['collier_coefmap'] = '\n'.join(map_definition) 2311 2312 file = file % replace_dict 2313 2314 if writer: 2315 writer.writelines(file,context=self.get_context(matrix_element)) 2316 else: 2317 return file
2318
2319 - def write_GOLEM_interface(self, writer, matrix_element):
2320 """ Create the file GOLEM_interface.f which does NOT contain the subroutine 2321 defining the loop HELAS-like calls along with the general interfacing 2322 subroutine. """ 2323 2324 # First write GOLEM_interface which interfaces MG5 with TIR. 2325 replace_dict=copy.copy(matrix_element.rep_dict) 2326 2327 # We finalize TIR result differently wether we used the built-in 2328 # squaring against the born. 2329 if not self.get_context(matrix_element)['AmplitudeReduction']: 2330 replace_dict['loop_induced_sqsoindex']=',SQSOINDEX' 2331 else: 2332 replace_dict['loop_induced_sqsoindex']='' 2333 2334 file = open(os.path.join(self.template_dir,'GOLEM_interface.inc')).read() 2335 2336 file = file % replace_dict 2337 2338 FPR = q_polynomial.FortranPolynomialRoutines(replace_dict['maxrank'],\ 2339 coef_format=replace_dict['complex_dp_format'],\ 2340 sub_prefix=replace_dict['proc_prefix']) 2341 2342 file += '\n\n'+FPR.write_golem95_mapping() 2343 2344 if writer: 2345 writer.writelines(file,context=self.get_context(matrix_element)) 2346 else: 2347 return file
2348
2349 - def write_polynomial_subroutines(self,writer,matrix_element):
2350 """ Subroutine to create all the subroutines relevant for handling 2351 the polynomials representing the loop numerator """ 2352 2353 # First create 'loop_max_coefs.inc' 2354 IncWriter=writers.FortranWriter('loop_max_coefs.inc','w') 2355 IncWriter.writelines("""INTEGER LOOPMAXCOEFS 2356 PARAMETER (LOOPMAXCOEFS=%(loop_max_coefs)d)""" 2357 %matrix_element.rep_dict) 2358 2359 # Then coef_specs directly in DHELAS if it does not exist already 2360 # 'coef_specs.inc'. If several processes exported different files there, 2361 # it is fine because the overall maximum value will overwrite it in the 2362 # end 2363 coef_specs_path = pjoin(self.dir_path, 'Source','DHELAS','coef_specs.inc') 2364 if not os.path.isfile(coef_specs_path): 2365 IncWriter=writers.FortranWriter(coef_specs_path,'w') 2366 IncWriter.writelines("""INTEGER MAXLWFSIZE 2367 PARAMETER (MAXLWFSIZE=%(max_lwf_size)d) 2368 INTEGER VERTEXMAXCOEFS 2369 PARAMETER (VERTEXMAXCOEFS=%(vertex_max_coefs)d)"""\ 2370 %matrix_element.rep_dict) 2371 IncWriter.close() 2372 2373 # List of all subroutines to place there 2374 subroutines=[] 2375 2376 # Start from the routine in the template 2377 replace_dict = copy.copy(matrix_element.rep_dict) 2378 2379 dp_routine = open(os.path.join(self.template_dir,'polynomial.inc')).read() 2380 mp_routine = open(os.path.join(self.template_dir,'polynomial.inc')).read() 2381 # The double precision version of the basic polynomial routines, such as 2382 # create_loop_coefs 2383 replace_dict['complex_format'] = replace_dict['complex_dp_format'] 2384 replace_dict['real_format'] = replace_dict['real_dp_format'] 2385 replace_dict['mp_prefix'] = '' 2386 replace_dict['kind'] = 8 2387 replace_dict['zero_def'] = '0.0d0' 2388 replace_dict['one_def'] = '1.0d0' 2389 dp_routine = dp_routine % replace_dict 2390 # The quadruple precision version of the basic polynomial routines 2391 replace_dict['complex_format'] = replace_dict['complex_mp_format'] 2392 replace_dict['real_format'] = replace_dict['real_mp_format'] 2393 replace_dict['mp_prefix'] = 'MP_' 2394 replace_dict['kind'] = 16 2395 replace_dict['zero_def'] = '0.0e0_16' 2396 replace_dict['one_def'] = '1.0e0_16' 2397 mp_routine = mp_routine % replace_dict 2398 subroutines.append(dp_routine) 2399 subroutines.append(mp_routine) 2400 2401 # Initialize the polynomial routine writer 2402 poly_writer=q_polynomial.FortranPolynomialRoutines( 2403 matrix_element.get_max_loop_rank(), 2404 updater_max_rank = matrix_element.get_max_loop_vertex_rank(), 2405 sub_prefix=replace_dict['proc_prefix'], 2406 proc_prefix=replace_dict['proc_prefix'], 2407 mp_prefix='') 2408 # Write the polynomial constant module common to all 2409 writer.writelines(poly_writer.write_polynomial_constant_module()+'\n') 2410 2411 mp_poly_writer=q_polynomial.FortranPolynomialRoutines( 2412 matrix_element.get_max_loop_rank(), 2413 updater_max_rank = matrix_element.get_max_loop_vertex_rank(), 2414 coef_format='complex*32', sub_prefix='MP_'+replace_dict['proc_prefix'], 2415 proc_prefix=replace_dict['proc_prefix'], mp_prefix='MP_') 2416 # The eval subroutine 2417 subroutines.append(poly_writer.write_polynomial_evaluator()) 2418 subroutines.append(mp_poly_writer.write_polynomial_evaluator()) 2419 # The add coefs subroutine 2420 subroutines.append(poly_writer.write_add_coefs()) 2421 subroutines.append(mp_poly_writer.write_add_coefs()) 2422 # The merging one for creating the loop coefficients 2423 subroutines.append(poly_writer.write_wl_merger()) 2424 subroutines.append(mp_poly_writer.write_wl_merger()) 2425 for wl_update in matrix_element.get_used_wl_updates(): 2426 # We pick here the most appropriate way of computing the 2427 # tensor product depending on the rank of the two tensors. 2428 # The various choices below come out from a careful comparison of 2429 # the different methods using the valgrind profiler 2430 if wl_update[0]==wl_update[1]==1 or wl_update[0]==0 or wl_update[1]==0: 2431 # If any of the rank is 0, or if they are both equal to 1, 2432 # then we are better off using the full expanded polynomial, 2433 # and let the compiler optimize it. 2434 subroutines.append(poly_writer.write_expanded_wl_updater(\ 2435 wl_update[0],wl_update[1])) 2436 subroutines.append(mp_poly_writer.write_expanded_wl_updater(\ 2437 wl_update[0],wl_update[1])) 2438 elif wl_update[0] >= wl_update[1]: 2439 # If the loop polynomial is larger then we will filter and loop 2440 # over the vertex coefficients first. The smallest product for 2441 # which the routines below could be used is then 2442 # loop_rank_2 x vertex_rank_1 2443 subroutines.append(poly_writer.write_compact_wl_updater(\ 2444 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=True)) 2445 subroutines.append(mp_poly_writer.write_compact_wl_updater(\ 2446 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=True)) 2447 else: 2448 # This happens only when the rank of the updater (vertex coef) 2449 # is larger than the one of the loop coef and none of them is 2450 # zero. This never happens in renormalizable theories but it 2451 # can happen in the HEFT ones or other effective ones. In this 2452 # case the typicaly use of this routine if for the product 2453 # loop_rank_1 x vertex_rank_2 2454 subroutines.append(poly_writer.write_compact_wl_updater(\ 2455 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=False)) 2456 subroutines.append(mp_poly_writer.write_compact_wl_updater(\ 2457 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=False)) 2458 2459 writer.writelines('\n\n'.join(subroutines), 2460 context=self.get_context(matrix_element))
2461
2462 - def write_mp_compute_loop_coefs(self, writer, matrix_element, fortran_model):
2463 """Create the write_mp_compute_loop_coefs.f file.""" 2464 2465 if not matrix_element.get('processes') or \ 2466 not matrix_element.get('diagrams'): 2467 return 0 2468 2469 # Set lowercase/uppercase Fortran code 2470 2471 writers.FortranWriter.downcase = False 2472 2473 replace_dict = copy.copy(matrix_element.rep_dict) 2474 2475 # Extract helas calls 2476 squared_orders = matrix_element.get_squared_order_contribs() 2477 split_orders = matrix_element.get('processes')[0].get('split_orders') 2478 2479 born_ct_helas_calls , uvct_helas_calls = \ 2480 fortran_model.get_born_ct_helas_calls(matrix_element, 2481 squared_orders=squared_orders, split_orders=split_orders) 2482 self.turn_to_mp_calls(born_ct_helas_calls) 2483 self.turn_to_mp_calls(uvct_helas_calls) 2484 coef_construction, coef_merging = fortran_model.get_coef_construction_calls(\ 2485 matrix_element,group_loops=self.group_loops, 2486 squared_orders=squared_orders,split_orders=split_orders) 2487 # The proc_prefix must be replaced 2488 coef_construction = [c % matrix_element.rep_dict for c 2489 in coef_construction] 2490 self.turn_to_mp_calls(coef_construction) 2491 self.turn_to_mp_calls(coef_merging) 2492 2493 file = open(os.path.join(self.template_dir,\ 2494 'mp_compute_loop_coefs.inc')).read() 2495 2496 # Setup the contextual environment which is used in the splitting 2497 # functions below 2498 context = self.get_context(matrix_element) 2499 file=self.split_HELASCALLS(writer,replace_dict,\ 2500 'mp_helas_calls_split.inc',file,born_ct_helas_calls,\ 2501 'mp_born_ct_helas_calls','mp_helas_calls_ampb', 2502 required_so_broadcaster = 'MP_CT_REQ_SO_DONE', 2503 continue_label = 2000, 2504 momenta_array_name = 'MP_P', 2505 context=context) 2506 file=self.split_HELASCALLS(writer,replace_dict,\ 2507 'mp_helas_calls_split.inc',file,uvct_helas_calls,\ 2508 'mp_uvct_helas_calls','mp_helas_calls_uvct', 2509 required_so_broadcaster = 'MP_UVCT_REQ_SO_DONE', 2510 continue_label = 3000, 2511 momenta_array_name = 'MP_P', 2512 context=context) 2513 file=self.split_HELASCALLS(writer,replace_dict,\ 2514 'mp_helas_calls_split.inc',file,coef_construction,\ 2515 'mp_coef_construction','mp_coef_construction', 2516 required_so_broadcaster = 'MP_LOOP_REQ_SO_DONE', 2517 continue_label = 4000, 2518 momenta_array_name = 'MP_P', 2519 context=context) 2520 2521 replace_dict['mp_coef_merging']='\n'.join(coef_merging) 2522 2523 file = file % replace_dict 2524 2525 # Write the file 2526 writer.writelines(file,context=context)
2527
2528 - def write_color_matrix_data_file(self, writer, col_matrix):
2529 """Writes out the files (Loop|Born)ColorFlowMatrix.dat corresponding 2530 to the color coefficients for JAMP(L|B)*JAMP(L|B).""" 2531 2532 res = [] 2533 for line in range(len(col_matrix._col_basis1)): 2534 numerators = [] 2535 denominators = [] 2536 for row in range(len(col_matrix._col_basis2)): 2537 coeff = col_matrix.col_matrix_fixed_Nc[(line,row)] 2538 numerators.append('%6r'%coeff[0].numerator) 2539 denominators.append('%6r'%( 2540 coeff[0].denominator*(-1 if coeff[1] else 1))) 2541 res.append(' '.join(numerators)) 2542 res.append(' '.join(denominators)) 2543 2544 res.append('EOF') 2545 2546 writer.writelines('\n'.join(res))
2547
2548 - def write_color_flow_coefs_data_file(self, writer, color_amplitudes, 2549 color_basis):
2550 """ Writes the file '(Loop|Born)ColorFlowCoefs.dat using the coefficients 2551 list of the color_amplitudes in the argument of this function.""" 2552 2553 my_cs = color.ColorString() 2554 2555 res = [] 2556 2557 for jamp_number, coeff_list in enumerate(color_amplitudes): 2558 my_cs.from_immutable(sorted(color_basis.keys())[jamp_number]) 2559 # Order the ColorString so that its ordering is canonical. 2560 ordered_cs = color.ColorFactor([my_cs]).full_simplify()[0] 2561 res.append('%d # Coefficient for flow number %d with expr. %s'\ 2562 %(len(coeff_list), jamp_number+1, repr(ordered_cs))) 2563 # A line element is a tuple (numerator, denominator, amplitude_id) 2564 line_element = [] 2565 2566 for (coefficient, amp_number) in coeff_list: 2567 coef = self.cat_coeff(\ 2568 coefficient[0],coefficient[1],coefficient[2],coefficient[3]) 2569 line_element.append((coef[0].numerator, 2570 coef[0].denominator*(-1 if coef[1] else 1),amp_number)) 2571 # Sort them by growing amplitude number 2572 line_element.sort(key=lambda el:el[2]) 2573 2574 for i in range(3): 2575 res.append(' '.join('%6r'%elem[i] for elem in line_element)) 2576 2577 res.append('EOF') 2578 writer.writelines('\n'.join(res))
2579
2580 - def write_compute_color_flows(self, writer, matrix_element, config_map):
2581 """Writes the file compute_color_flows.f which uses the AMPL results 2582 from a common block to project them onto the color flow space so as 2583 to compute the JAMP quantities. For loop induced processes, this file 2584 will also contain a subroutine computing AMPL**2 for madevent 2585 multichanneling.""" 2586 2587 loop_col_amps = matrix_element.get_loop_color_amplitudes() 2588 matrix_element.rep_dict['nLoopFlows'] = len(loop_col_amps) 2589 2590 dat_writer = open(pjoin('..','MadLoop5_resources', 2591 '%(proc_prefix)sLoopColorFlowCoefs.dat' 2592 %matrix_element.rep_dict),'w') 2593 self.write_color_flow_coefs_data_file(dat_writer, 2594 loop_col_amps, matrix_element.get('loop_color_basis')) 2595 dat_writer.close() 2596 2597 dat_writer = open(pjoin('..','MadLoop5_resources', 2598 '%(proc_prefix)sLoopColorFlowMatrix.dat' 2599 %matrix_element.rep_dict),'w') 2600 self.write_color_matrix_data_file(dat_writer, 2601 matrix_element.get('color_matrix')) 2602 dat_writer.close() 2603 2604 if matrix_element.get('processes')[0].get('has_born'): 2605 born_col_amps = matrix_element.get_born_color_amplitudes() 2606 matrix_element.rep_dict['nBornFlows'] = len(born_col_amps) 2607 dat_writer = open(pjoin('..','MadLoop5_resources', 2608 '%(proc_prefix)sBornColorFlowCoefs.dat' 2609 %matrix_element.rep_dict),'w') 2610 self.write_color_flow_coefs_data_file(dat_writer, 2611 born_col_amps, matrix_element.get('born_color_basis')) 2612 dat_writer.close() 2613 2614 dat_writer = open(pjoin('..','MadLoop5_resources', 2615 '%(proc_prefix)sBornColorFlowMatrix.dat' 2616 %matrix_element.rep_dict),'w') 2617 self.write_color_matrix_data_file(dat_writer, 2618 color_amp.ColorMatrix(matrix_element.get('born_color_basis'))) 2619 dat_writer.close() 2620 else: 2621 matrix_element.rep_dict['nBornFlows'] = 0 2622 2623 replace_dict = copy.copy(matrix_element.rep_dict) 2624 2625 # The following variables only have to be defined for the LoopInduced 2626 # output for madevent. 2627 if self.get_context(matrix_element)['MadEventOutput']: 2628 self.get_amp2_lines(matrix_element, replace_dict, config_map) 2629 else: 2630 replace_dict['config_map_definition'] = '' 2631 replace_dict['config_index_map_definition'] = '' 2632 replace_dict['nmultichannels'] = 0 2633 replace_dict['nmultichannel_configs'] = 0 2634 2635 # The nmultichannels entry will be used in the matrix<i> wrappers as 2636 # well, so we add it to the general_replace_dict too. 2637 matrix_element.rep_dict['nmultichannels'] = \ 2638 replace_dict['nmultichannels'] 2639 matrix_element.rep_dict['nmultichannel_configs'] = \ 2640 replace_dict['nmultichannel_configs'] 2641 2642 2643 file = open(os.path.join(self.template_dir,\ 2644 'compute_color_flows.inc')).read()%replace_dict 2645 2646 writer.writelines(file,context=self.get_context(matrix_element))
2647
2648 - def write_global_specs(self, matrix_element_list, output_path=None):
2649 """ From the list of matrix element, or the single matrix element, derive 2650 the global quantities to write in global_coef_specs.inc""" 2651 2652 if isinstance(matrix_element_list, (group_subprocs.SubProcessGroupList, 2653 loop_helas_objects.LoopHelasProcess)): 2654 matrix_element_list = matrix_element_list.get_matrix_elements() 2655 2656 if isinstance(matrix_element_list, list): 2657 me_list = matrix_element_list 2658 else: 2659 me_list = [matrix_element_list] 2660 2661 if output_path is None: 2662 out_path = pjoin(self.dir_path,'SubProcesses','global_specs.inc') 2663 else: 2664 out_path = output_path 2665 2666 open(out_path,'w').write( 2667 """ integer MAXNEXTERNAL 2668 parameter(MAXNEXTERNAL=%d) 2669 integer OVERALLMAXRANK 2670 parameter(OVERALLMAXRANK=%d) 2671 integer NPROCS 2672 parameter(NPROCS=%d)"""%( 2673 max(me.get_nexternal_ninitial()[0] for me in me_list), 2674 max(me.get_max_loop_rank() for me in me_list), 2675 len(me_list)))
2676 2677
2678 - def fix_coef_specs(self, overall_max_lwf_spin, overall_max_loop_vert_rank):
2679 """ If processes with different maximum loop wavefunction size or 2680 different maximum loop vertex rank have to be output together, then 2681 the file 'coef.inc' in the HELAS Source folder must contain the overall 2682 maximum of these quantities. It is not safe though, and the user has 2683 been appropriatly warned at the output stage """ 2684 2685 # Remove the existing link 2686 coef_specs_path=os.path.join(self.dir_path,'Source','DHELAS',\ 2687 'coef_specs.inc') 2688 os.remove(coef_specs_path) 2689 2690 spin_to_wf_size = {1:4,2:4,3:4,4:16,5:16} 2691 overall_max_lwf_size = spin_to_wf_size[overall_max_lwf_spin] 2692 overall_max_loop_vert_coefs = q_polynomial.get_number_of_coefs_for_rank( 2693 overall_max_loop_vert_rank) 2694 # Replace it by the appropriate value 2695 IncWriter=writers.FortranWriter(coef_specs_path,'w') 2696 IncWriter.writelines("""INTEGER MAXLWFSIZE 2697 PARAMETER (MAXLWFSIZE=%(max_lwf_size)d) 2698 INTEGER VERTEXMAXCOEFS 2699 PARAMETER (VERTEXMAXCOEFS=%(vertex_max_coefs)d)"""\ 2700 %{'max_lwf_size':overall_max_lwf_size, 2701 'vertex_max_coefs':overall_max_loop_vert_coefs}) 2702 IncWriter.close()
2703
2704 - def setup_check_sa_replacement_dictionary(self, matrix_element, \ 2705 split_orders,squared_orders,amps_orders):
2706 """ Sets up the replacement dictionary for the writeout of the steering 2707 file check_sa.f""" 2708 if len(squared_orders)<1: 2709 matrix_element.rep_dict['print_so_loop_results']=\ 2710 "write(*,*) 'No split orders defined.'" 2711 elif len(squared_orders)==1: 2712 matrix_element.rep_dict['set_coupling_target']='' 2713 matrix_element.rep_dict['print_so_loop_results']=\ 2714 "write(*,*) 'All loop contributions are of split orders (%s)'"%( 2715 ' '.join(['%s=%d'%(split_orders[i],squared_orders[0][i]) \ 2716 for i in range(len(split_orders))])) 2717 else: 2718 matrix_element.rep_dict['set_coupling_target']='\n'.join([ 2719 '# Here we leave the default target squared split order to -1, meaning that we'+ 2720 ' aim at computing all individual contributions. You can choose otherwise.', 2721 'call %(proc_prefix)sSET_COUPLINGORDERS_TARGET(-1)'%matrix_element.rep_dict]) 2722 matrix_element.rep_dict['print_so_loop_results'] = '\n'.join([ 2723 '\n'.join(["write(*,*) '%dL) Loop ME for orders (%s) :'"%((j+1),(' '.join( 2724 ['%s=%d'%(split_orders[i],so[i]) for i in range(len(split_orders))]))), 2725 "IF (PREC_FOUND(%d).NE.-1.0d0) THEN"%(j+1), 2726 "write(*,*) ' > accuracy = ',PREC_FOUND(%d)"%(j+1), 2727 "ELSE", 2728 "write(*,*) ' > accuracy = NA'", 2729 "ENDIF", 2730 "write(*,*) ' > finite = ',MATELEM(1,%d)"%(j+1), 2731 "write(*,*) ' > 1eps = ',MATELEM(2,%d)"%(j+1), 2732 "write(*,*) ' > 2eps = ',MATELEM(3,%d)"%(j+1) 2733 ]) for j, so in enumerate(squared_orders)]) 2734 matrix_element.rep_dict['write_so_loop_results'] = '\n'.join( 2735 ["write (69,*) 'Split_Orders_Names %s'"%(' '.join(split_orders))]+ 2736 ['\n'.join([ 2737 "write (69,*) 'Loop_SO_Results %s'"%(' '.join( 2738 ['%d'%so_value for so_value in so])), 2739 "write (69,*) 'SO_Loop ACC ',PREC_FOUND(%d)"%(j+1), 2740 "write (69,*) 'SO_Loop FIN ',MATELEM(1,%d)"%(j+1), 2741 "write (69,*) 'SO_Loop 1EPS ',MATELEM(2,%d)"%(j+1), 2742 "write (69,*) 'SO_Loop 2EPS ',MATELEM(3,%d)"%(j+1), 2743 ]) for j, so in enumerate(squared_orders)]) 2744 2745 # We must reconstruct here the born squared orders. 2746 squared_born_so_orders = [] 2747 for i, amp_order in enumerate(amps_orders['born_amp_orders']): 2748 for j in range(0,i+1): 2749 key = tuple([ord1 + ord2 for ord1,ord2 in \ 2750 zip(amp_order[0],amps_orders['born_amp_orders'][j][0])]) 2751 if not key in squared_born_so_orders: 2752 squared_born_so_orders.append(key) 2753 if len(squared_born_so_orders)<1: 2754 matrix_element.rep_dict['print_so_born_results'] = '' 2755 elif len(squared_born_so_orders)==1: 2756 matrix_element.rep_dict['print_so_born_results'] = \ 2757 "write(*,*) 'All Born contributions are of split orders (%s)'"%( 2758 ' '.join(['%s=%d'%(split_orders[i],squared_born_so_orders[0][i]) 2759 for i in range(len(split_orders))])) 2760 else: 2761 matrix_element.rep_dict['print_so_born_results'] = '\n'.join([ 2762 "write(*,*) '%dB) Born ME for orders (%s) = ',MATELEM(0,%d)"%(j+1,' '.join( 2763 ['%s=%d'%(split_orders[i],so[i]) for i in range(len(split_orders))]),j+1) 2764 for j, so in enumerate(squared_born_so_orders)]) 2765 matrix_element.rep_dict['write_so_born_results'] = '\n'.join( 2766 ['\n'.join([ 2767 "write (69,*) 'Born_SO_Results %s'"%(' '.join( 2768 ['%d'%so_value for so_value in so])), 2769 "write (69,*) 'SO_Born BORN ',MATELEM(0,%d)"%(j+1), 2770 ]) for j, so in enumerate(squared_born_so_orders)]) 2771 2772 # Add a bottom bar to both print_so_[loop|born]_results 2773 matrix_element.rep_dict['print_so_born_results'] += \ 2774 '\nwrite (*,*) "---------------------------------"' 2775 matrix_element.rep_dict['print_so_loop_results'] += \ 2776 '\nwrite (*,*) "---------------------------------"'
2777
2778 - def write_tir_cache_size_include(self, writer):
2779 """Write the file 'tir_cache_size.inc' which sets the size of the TIR 2780 cache the the user wishes to employ and the default value for it. 2781 This can have an impact on MadLoop speed when using stability checks 2782 but also impacts in a non-negligible way MadLoop's memory footprint. 2783 It is therefore important that the user can chose its size.""" 2784 2785 # For the standalone optimized output, a size of one is necessary. 2786 # The MadLoop+MadEvent output sets it to 2 because it can gain further 2787 # speed increase with a TIR cache of size 2 due to the structure of the 2788 # calls to MadLoop there. 2789 tir_cach_size = "parameter(TIR_CACHE_SIZE=1)" 2790 writer.writelines(tir_cach_size)
2791
2792 - def write_loopmatrix(self, writer, matrix_element, fortran_model, \ 2793 write_auxiliary_files=True,):
2794 """Create the loop_matrix.f file.""" 2795 2796 if not matrix_element.get('processes') or \ 2797 not matrix_element.get('diagrams'): 2798 return 0 2799 2800 # Set lowercase/uppercase Fortran code 2801 writers.FortranWriter.downcase = False 2802 2803 # Starting off with the treatment of the split_orders since some 2804 # of the information extracted there will come into the 2805 # general_replace_dict. Split orders are abbreviated SO in all the 2806 # keys of the replacement dictionaries. 2807 2808 # Take care of the split_orders 2809 squared_orders, amps_orders = matrix_element.get_split_orders_mapping() 2810 # Creating here a temporary list containing only the information of 2811 # what are the different squared split orders contributing 2812 # (i.e. not using max_contrib_amp_number and max_contrib_ref_amp_number) 2813 sqso_contribs = [sqso[0] for sqso in squared_orders] 2814 split_orders = matrix_element.get('processes')[0].get('split_orders') 2815 # The entries set in the function below are only for check_sa written 2816 # out in write_loop__matrix_element_v4 (it is however placed here because the 2817 # split order information is only available here). 2818 self.setup_check_sa_replacement_dictionary(matrix_element, 2819 split_orders,sqso_contribs,amps_orders) 2820 2821 # Now recast the split order basis for the loop, born and counterterm 2822 # amplitude into one single splitorderbasis. 2823 overall_so_basis = list(set( 2824 [born_so[0] for born_so in amps_orders['born_amp_orders']]+ 2825 [born_so[0] for born_so in amps_orders['loop_amp_orders']])) 2826 # We must re-sort it to make sure it follows an increasing WEIGHT order 2827 order_hierarchy = matrix_element.get('processes')[0]\ 2828 .get('model').get('order_hierarchy') 2829 if set(order_hierarchy.keys()).union(set(split_orders))==\ 2830 set(order_hierarchy.keys()): 2831 overall_so_basis.sort(key= lambda so: 2832 sum([order_hierarchy[split_orders[i]]*order_power for \ 2833 i, order_power in enumerate(so)])) 2834 2835 # Those are additional entries used throughout the different files of 2836 # MadLoop5 2837 matrix_element.rep_dict['split_order_str_list'] = str(split_orders) 2838 matrix_element.rep_dict['nSO'] = len(split_orders) 2839 matrix_element.rep_dict['nSquaredSO'] = len(sqso_contribs) 2840 matrix_element.rep_dict['nAmpSO'] = len(overall_so_basis) 2841 2842 writers.FortranWriter('nsquaredSO.inc').writelines( 2843 """INTEGER NSQUAREDSO 2844 PARAMETER (NSQUAREDSO=%d)"""%matrix_element.rep_dict['nSquaredSO']) 2845 2846 replace_dict = copy.copy(matrix_element.rep_dict) 2847 # Build the general array mapping the split orders indices to their 2848 # definition 2849 replace_dict['ampsplitorders'] = '\n'.join(self.get_split_orders_lines(\ 2850 overall_so_basis,'AMPSPLITORDERS')) 2851 replace_dict['SquaredSO'] = '\n'.join(self.get_split_orders_lines(\ 2852 sqso_contribs,'SQPLITORDERS')) 2853 2854 # Specify what are the squared split orders selected by the proc def. 2855 replace_dict['chosen_so_configs'] = self.set_chosen_SO_index( 2856 matrix_element.get('processes')[0],sqso_contribs) 2857 2858 # Now we build the different arrays storing the split_orders ID of each 2859 # amp. 2860 ampSO_list=[-1]*sum(len(el[1]) for el in amps_orders['loop_amp_orders']) 2861 for SO in amps_orders['loop_amp_orders']: 2862 for amp_number in SO[1]: 2863 ampSO_list[amp_number-1]=overall_so_basis.index(SO[0])+1 2864 2865 replace_dict['loopAmpSO'] = '\n'.join(self.format_integer_list( 2866 ampSO_list,'LOOPAMPORDERS')) 2867 ampSO_list=[-1]*sum(len(el[1]) for el in amps_orders['born_amp_orders']) 2868 for SO in amps_orders['born_amp_orders']: 2869 for amp_number in SO[1]: 2870 ampSO_list[amp_number-1]=overall_so_basis.index(SO[0])+1 2871 replace_dict['BornAmpSO'] = '\n'.join(self.format_integer_list( 2872 ampSO_list,'BORNAMPORDERS')) 2873 2874 # We then go to the TIR setup 2875 # The first entry is the CutTools, we make sure it is available 2876 looplibs_av=['.TRUE.'] 2877 # one should be careful about the order in the following as it must match 2878 # the ordering in MadLoopParamsCard. 2879 for tir_lib in ['pjfry','iregi','golem','samurai','ninja','collier']: 2880 looplibs_av.append('.TRUE.' if tir_lib in self.all_tir and \ 2881 self.tir_available_dict[tir_lib] else '.FALSE.') 2882 replace_dict['data_looplibs_av']=','.join(looplibs_av) 2883 2884 # Helicity offset convention 2885 # For a given helicity, the attached integer 'i' means 2886 # 'i' in ]-inf;-HELOFFSET[ -> Helicity is equal, up to a sign, 2887 # to helicity number abs(i+HELOFFSET) 2888 # 'i' == -HELOFFSET -> Helicity is analytically zero 2889 # 'i' in ]-HELOFFSET,inf[ -> Helicity is contributing with weight 'i'. 2890 # If it is zero, it is skipped. 2891 # Typically, the hel_offset is 10000 2892 replace_dict['hel_offset'] = 10000 2893 2894 # Extract overall denominator 2895 # Averaging initial state color, spin, and identical FS particles 2896 den_factor_line = self.get_den_factor_line(matrix_element) 2897 replace_dict['den_factor_line'] = den_factor_line 2898 2899 # When the user asks for the polarized matrix element we must 2900 # multiply back by the helicity averaging factor 2901 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor() 2902 replace_dict['beamone_helavgfactor'], replace_dict['beamtwo_helavgfactor'] =\ 2903 matrix_element.get_beams_hel_avg_factor() 2904 2905 if write_auxiliary_files: 2906 # Write out the color matrix 2907 (CMNum,CMDenom) = self.get_color_matrix(matrix_element) 2908 CMWriter=open(pjoin('..','MadLoop5_resources', 2909 '%(proc_prefix)sColorNumFactors.dat'%matrix_element.rep_dict),'w') 2910 for ColorLine in CMNum: 2911 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 2912 CMWriter.close() 2913 CMWriter=open(pjoin('..','MadLoop5_resources', 2914 '%(proc_prefix)sColorDenomFactors.dat'%matrix_element.rep_dict),'w') 2915 for ColorLine in CMDenom: 2916 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 2917 CMWriter.close() 2918 2919 # Write out the helicity configurations 2920 HelConfigs=matrix_element.get_helicity_matrix() 2921 HelConfigWriter=open(pjoin('..','MadLoop5_resources', 2922 '%(proc_prefix)sHelConfigs.dat'%matrix_element.rep_dict),'w') 2923 for HelConfig in HelConfigs: 2924 HelConfigWriter.write(' '.join(['%d'%H for H in HelConfig])+'\n') 2925 HelConfigWriter.close() 2926 2927 # Extract helas calls 2928 born_ct_helas_calls, uvct_helas_calls = \ 2929 fortran_model.get_born_ct_helas_calls(matrix_element, 2930 squared_orders=squared_orders,split_orders=split_orders) 2931 coef_construction, coef_merging = fortran_model.get_coef_construction_calls(\ 2932 matrix_element,group_loops=self.group_loops, 2933 squared_orders=squared_orders,split_orders=split_orders) 2934 2935 loop_CT_calls = fortran_model.get_loop_CT_calls(matrix_element,\ 2936 group_loops=self.group_loops, 2937 squared_orders=squared_orders, split_orders=split_orders) 2938 # The proc_prefix must be replaced 2939 coef_construction = [c % matrix_element.rep_dict for c 2940 in coef_construction] 2941 loop_CT_calls = [lc % matrix_element.rep_dict for lc in loop_CT_calls] 2942 2943 file = open(os.path.join(self.template_dir,\ 2944 'loop_matrix_standalone.inc')).read() 2945 2946 # Setup the contextual environment which is used in the splitting 2947 # functions below 2948 context = self.get_context(matrix_element) 2949 file=self.split_HELASCALLS(writer,replace_dict,\ 2950 'helas_calls_split.inc',file,born_ct_helas_calls,\ 2951 'born_ct_helas_calls','helas_calls_ampb', 2952 required_so_broadcaster = 'CT_REQ_SO_DONE', 2953 continue_label = 2000, context = context) 2954 file=self.split_HELASCALLS(writer,replace_dict,\ 2955 'helas_calls_split.inc',file,uvct_helas_calls,\ 2956 'uvct_helas_calls','helas_calls_uvct', 2957 required_so_broadcaster = 'UVCT_REQ_SO_DONE', 2958 continue_label = 3000, context=context) 2959 file=self.split_HELASCALLS(writer,replace_dict,\ 2960 'helas_calls_split.inc',file,coef_construction,\ 2961 'coef_construction','coef_construction', 2962 required_so_broadcaster = 'LOOP_REQ_SO_DONE', 2963 continue_label = 4000, context=context) 2964 file=self.split_HELASCALLS(writer,replace_dict,\ 2965 'helas_calls_split.inc',file,loop_CT_calls,\ 2966 'loop_CT_calls','loop_CT_calls', 2967 required_so_broadcaster = 'CTCALL_REQ_SO_DONE', 2968 continue_label = 5000, context=context) 2969 2970 # Add the entries above to the general_replace_dict so that it can be 2971 # used by write_mp_compute_loop_coefs later 2972 matrix_element.rep_dict['loop_CT_calls']=replace_dict['loop_CT_calls'] 2973 matrix_element.rep_dict['born_ct_helas_calls']=replace_dict['born_ct_helas_calls'] 2974 matrix_element.rep_dict['uvct_helas_calls']=replace_dict['uvct_helas_calls'] 2975 matrix_element.rep_dict['coef_construction']=replace_dict['coef_construction'] 2976 2977 replace_dict['coef_merging']='\n'.join(coef_merging) 2978 2979 file = file % replace_dict 2980 number_of_calls = len(filter(lambda call: call.find('CALL LOOP') != 0, \ 2981 loop_CT_calls)) 2982 if writer: 2983 # Write the file 2984 writer.writelines(file,context=context) 2985 return number_of_calls 2986 else: 2987 # Return it to be written along with the others 2988 return number_of_calls, file
2989 2990 #=============================================================================== 2991 # LoopProcessExporterFortranSA 2992 #===============================================================================
2993 -class LoopProcessExporterFortranMatchBox(LoopProcessOptimizedExporterFortranSA, 2994 export_v4.ProcessExporterFortranMatchBox):
2995 """Class to take care of exporting a set of loop matrix elements in the 2996 Fortran format.""" 2997 2998 default_opt = {'clean': False, 'complex_mass':False, 2999 'export_format':'madloop_matchbox', 'mp':True, 3000 'loop_dir':'', 'cuttools_dir':'', 3001 'fortran_compiler':'gfortran', 3002 'output_dependencies':'external', 3003 'sa_symmetry':True} 3004 3005 3006
3007 - def get_color_string_lines(self, matrix_element):
3008 """Return the color matrix definition lines for this matrix element. Split 3009 rows in chunks of size n.""" 3010 3011 return export_v4.ProcessExporterFortranMatchBox.get_color_string_lines(matrix_element)
3012 3013
3014 - def get_JAMP_lines(self, *args, **opts):
3015 """Adding leading color part of the colorflow""" 3016 3017 return export_v4.ProcessExporterFortranMatchBox.get_JAMP_lines(self, *args, **opts)
3018
3019 - def get_ME_identifier(self, matrix_element, group_number = None, group_elem_number = None):
3020 """ To not mix notations between borns and virtuals we call it here also MG5 """ 3021 return 'MG5_%d_'%matrix_element.get('processes')[0].get('id')
3022 3023 3024 #=============================================================================== 3025 # LoopInducedExporter 3026 #===============================================================================
3027 -class LoopInducedExporterME(LoopProcessOptimizedExporterFortranSA):
3028 """ A class to specify all the functions common to LoopInducedExporterMEGroup 3029 and LoopInducedExporterMENoGroup (but not relevant for the original 3030 Madevent exporters)""" 3031 3032 madloop_makefile_name = 'makefile_MadLoop' 3033 3034
3035 - def __init__(self, *args, **opts):
3036 """ Initialize the process, setting the proc characteristics.""" 3037 super(LoopInducedExporterME, self).__init__(*args, **opts) 3038 self.proc_characteristic['loop_induced'] = True
3039
3040 - def get_context(self,*args,**opts):
3041 """ Make sure that the contextual variable MadEventOutput is set to 3042 True for this exporter""" 3043 3044 context = super(LoopInducedExporterME,self).get_context(*args,**opts) 3045 context['MadEventOutput'] = True 3046 return context
3047 3048 #=========================================================================== 3049 # write a procdef_mg5 (an equivalent of the MG4 proc_card.dat) 3050 #===========================================================================
3051 - def write_procdef_mg5(self, file_pos, modelname, process_str):
3052 """ write an equivalent of the MG4 proc_card in order that all the Madevent 3053 Perl script of MadEvent4 are still working properly for pure MG5 run. 3054 Not needed for StandAlone so we need to call the correct one 3055 """ 3056 3057 return export_v4.ProcessExporterFortranMEGroup.write_procdef_mg5( 3058 self, file_pos, modelname, process_str)
3059
3060 - def get_source_libraries_list(self):
3061 """ Returns the list of libraries to be compiling when compiling the 3062 SOURCE directory. It is different for loop_induced processes and 3063 also depends on the value of the 'output_dependencies' option""" 3064 3065 libraries_list = super(LoopInducedExporterME,self).\ 3066 get_source_libraries_list() 3067 3068 if self.dependencies=='internal': 3069 libraries_list.append('$(LIBDIR)libcts.$(libext)') 3070 libraries_list.append('$(LIBDIR)libiregi.$(libext)') 3071 3072 return libraries_list
3073 3080
3081 - def copy_template(self, *args, **opts):
3082 """Pick the right mother functions 3083 """ 3084 # Call specifically the necessary building functions for the mixed 3085 # template setup for both MadEvent and MadLoop standalone 3086 LoopProcessExporterFortranSA.loop_additional_template_setup(self, 3087 copy_Source_makefile=False) 3088 3089 LoopProcessOptimizedExporterFortranSA.\ 3090 loop_optimized_additional_template_setup(self)
3091 3092 3093 #=========================================================================== 3094 # Create jpeg diagrams, html pages,proc_card_mg5.dat and madevent.tar.gz 3095 #===========================================================================
3096 - def finalize(self, matrix_elements, history, mg5options, flaglist):
3097 """Function to finalize v4 directory, for inheritance. 3098 """ 3099 3100 self.proc_characteristic['loop_induced'] = True 3101 3102 # This can be uncommented if one desires to have the MadLoop 3103 # initialization performed at the end of the output phase. 3104 # Alternatively, one can simply execute the command 'initMadLoop' in 3105 # the madevent interactive interface after the output. 3106 # from madgraph.interface.madevent_interface import MadLoopInitializer 3107 # MadLoopInitializer.init_MadLoop(self.dir_path, 3108 # subproc_prefix=self.SubProc_prefix, MG_options=None) 3109 3110 self.write_global_specs(matrix_elements)
3111
3112 - def write_tir_cache_size_include(self, writer):
3113 """Write the file 'tir_cache_size.inc' which sets the size of the TIR 3114 cache the the user wishes to employ and the default value for it. 3115 This can have an impact on MadLoop speed when using stability checks 3116 but also impacts in a non-negligible way MadLoop's memory footprint. 3117 It is therefore important that the user can chose its size.""" 3118 3119 # In this case of MadLoop+MadEvent output, we set it to 2 because we 3120 # gain further speed increase with a TIR cache of size 2 due to the 3121 # the fact that we call MadLoop once per helicity configuration in this 3122 # case. 3123 tir_cach_size = "parameter(TIR_CACHE_SIZE=2)" 3124 writer.writelines(tir_cach_size)
3125
3126 - def write_matrix_element_v4(self, writer, matrix_element, fortran_model, 3127 proc_id = None, config_map = [], subproc_number = None):
3128 """ Write it the wrapper to call the ML5 subroutine in the library.""" 3129 3130 # Generating the MadEvent wrapping ME's routines 3131 if not matrix_element.get('processes') or \ 3132 not matrix_element.get('diagrams'): 3133 return 0 3134 3135 if not isinstance(writer, writers.FortranWriter): 3136 raise writers.FortranWriter.FortranWriterError(\ 3137 "writer not FortranWriter") 3138 3139 replace_dict = copy.copy(matrix_element.rep_dict) 3140 3141 # Extract version number and date from VERSION file 3142 info_lines = self.get_mg5_info_lines() 3143 replace_dict['info_lines'] = info_lines 3144 3145 # Extract process info lines 3146 process_lines = self.get_process_info_lines(matrix_element) 3147 replace_dict['process_lines'] = process_lines 3148 3149 # Set proc_id 3150 # It can be set to None when write_matrix_element_v4 is called without 3151 # grouping. In this case the subroutine SMATRIX should take an empty 3152 # suffix. 3153 if proc_id is None: 3154 replace_dict['proc_id'] = '' 3155 else: 3156 replace_dict['proc_id'] = proc_id 3157 3158 #set the average over the number of initial helicities 3159 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor() 3160 replace_dict['beamone_helavgfactor'], replace_dict['beamtwo_helavgfactor'] =\ 3161 matrix_element.get_beams_hel_avg_factor() 3162 3163 # Extract helicity lines 3164 helicity_lines = self.get_helicity_lines(matrix_element) 3165 replace_dict['helicity_lines'] = helicity_lines 3166 3167 3168 # Extract ndiags 3169 ndiags = len(matrix_element.get('diagrams')) 3170 replace_dict['ndiags'] = ndiags 3171 3172 # Set define_iconfigs_lines 3173 replace_dict['define_iconfigs_lines'] = \ 3174 """INTEGER MAPCONFIG(0:LMAXCONFIGS), ICONFIG 3175 COMMON/TO_MCONFIGS/MAPCONFIG, ICONFIG""" 3176 3177 if proc_id: 3178 # Set lines for subprocess group version 3179 # Set define_iconfigs_lines 3180 replace_dict['define_iconfigs_lines'] += \ 3181 """\nINTEGER SUBDIAG(MAXSPROC),IB(2) 3182 COMMON/TO_SUB_DIAG/SUBDIAG,IB""" 3183 # Set set_amp2_line 3184 replace_dict['configID_in_matrix'] = "SUBDIAG(%s)"%proc_id 3185 else: 3186 # Standard running 3187 # Set set_amp2_line 3188 replace_dict['configID_in_matrix'] = "MAPCONFIG(ICONFIG)" 3189 3190 # If group_numer 3191 replace_dict['ml_prefix'] = \ 3192 self.get_ME_identifier(matrix_element, subproc_number, proc_id) 3193 3194 # Extract ncolor 3195 ncolor = max(1, len(matrix_element.get('color_basis'))) 3196 replace_dict['ncolor'] = ncolor 3197 3198 n_tot_diags = len(matrix_element.get_loop_diagrams()) 3199 replace_dict['n_tot_diags'] = n_tot_diags 3200 3201 file = open(pjoin(_file_path, \ 3202 'iolibs/template_files/%s' % self.matrix_file)).read() 3203 file = file % replace_dict 3204 3205 # Write the file 3206 writer.writelines(file) 3207 3208 return 0, ncolor
3209
3210 - def get_amp2_lines(self, *args, **opts):
3211 """Make sure the function is implemented in the daughters""" 3212 3213 raise NotImplemented, 'The function get_amp2_lines must be called in '+\ 3214 ' the daugthers of LoopInducedExporterME'
3215 3216 #=============================================================================== 3217 # LoopInducedExporterMEGroup 3218 #===============================================================================
3219 -class LoopInducedExporterMEGroup(LoopInducedExporterME, 3220 export_v4.ProcessExporterFortranMEGroup):
3221 """Class to take care of exporting a set of grouped loop induced matrix 3222 elements""" 3223 3224 matrix_file = "matrix_loop_induced_madevent_group.inc" 3225 3231
3232 - def write_source_makefile(self, *args, **opts):
3233 """Pick the correct write_source_makefile function from 3234 ProcessExporterFortranMEGroup""" 3235 3236 export_v4.ProcessExporterFortranMEGroup.write_source_makefile(self, 3237 *args, **opts)
3238
3239 - def copy_template(self, *args, **opts):
3240 """Pick the right mother functions 3241 """ 3242 # Call specifically the necessary building functions for the mixed 3243 # template setup for both MadEvent and MadLoop standalone 3244 3245 # Start witht the MadEvent one 3246 export_v4.ProcessExporterFortranMEGroup.copy_template(self,*args,**opts) 3247 3248 # Then the MadLoop-standalone related one 3249 LoopInducedExporterME.copy_template(self, *args, **opts)
3250
3251 - def finalize(self, *args, **opts):
3252 """Pick the right mother functions 3253 """ 3254 # Call specifically what finalize_v4_directory must be used, so that the 3255 # MRO doesn't interfere. 3256 3257 self.proc_characteristic['loop_induced'] = True 3258 3259 export_v4.ProcessExporterFortranMEGroup.finalize(self,*args,**opts) 3260 3261 # And the finilize from LoopInducedExporterME which essentially takes 3262 # care of MadLoop virtuals initialization 3263 LoopInducedExporterME.finalize(self,*args,**opts)
3264
3265 - def generate_subprocess_directory(self, subproc_group, 3266 fortran_model,group_number):
3267 """Generate the Pn directory for a subprocess group in MadEvent, 3268 including the necessary matrix_N.f files, configs.inc and various 3269 other helper files""" 3270 3271 # Generate the MadLoop files 3272 calls = 0 3273 matrix_elements = subproc_group.get('matrix_elements') 3274 for ime, matrix_element in enumerate(matrix_elements): 3275 self.unique_id +=1 3276 calls += self.generate_loop_subprocess(matrix_element,fortran_model, 3277 group_number = group_number, proc_id = str(ime+1), 3278 # group_number = str(subproc_group.get('number')), proc_id = str(ime+1), 3279 config_map = subproc_group.get('diagram_maps')[ime], 3280 unique_id=self.unique_id) 3281 3282 # Then generate the MadEvent files 3283 export_v4.ProcessExporterFortranMEGroup.generate_subprocess_directory( 3284 self, subproc_group,fortran_model,group_number) 3285 3286 return calls
3287
3288 - def get_amp2_lines(self, matrix_element, replace_dict, config_map):
3289 """Return the various replacement dictionary inputs necessary for the 3290 multichanneling amp2 definition for the loop-induced MadEvent output. 3291 """ 3292 3293 if not config_map: 3294 raise MadGraph5Error, 'A multi-channeling configuration map is '+\ 3295 ' necessary for the MadEvent Loop-induced output with grouping.' 3296 3297 nexternal, ninitial = matrix_element.get_nexternal_ninitial() 3298 3299 ret_lines = [] 3300 # In this case, we need to sum up all amplitudes that have 3301 # identical topologies, as given by the config_map (which 3302 # gives the topology/config for each of the diagrams 3303 if isinstance(matrix_element, loop_helas_objects.LoopHelasMatrixElement): 3304 diagrams = matrix_element.get_loop_diagrams() 3305 else: 3306 diagrams = matrix_element.get('diagrams') 3307 3308 # Note that we need to use AMP2 number corresponding to the first 3309 # diagram number used for that AMP2. 3310 # The dictionary below maps the config ID to this corresponding first 3311 # diagram number 3312 config_index_map = {} 3313 # For each diagram number, the dictionary below gives the config_id it 3314 # belongs to or 0 if it doesn't belong to any. 3315 loop_amp_ID_to_config = {} 3316 3317 # Combine the diagrams with identical topologies 3318 config_to_diag_dict = {} 3319 for idiag, diag in enumerate(diagrams): 3320 try: 3321 config_to_diag_dict[config_map[idiag]].append(idiag) 3322 except KeyError: 3323 config_to_diag_dict[config_map[idiag]] = [idiag] 3324 3325 for config in sorted(config_to_diag_dict.keys()): 3326 config_index_map[config] = (config_to_diag_dict[config][0] + 1) 3327 3328 # First add the UV and R2 counterterm amplitudes of each selected 3329 # diagram for the multichannel config 3330 CT_amp_numbers = [a.get('number') for a in \ 3331 sum([diagrams[idiag].get_ct_amplitudes() for \ 3332 idiag in config_to_diag_dict[config]], [])] 3333 3334 for CT_amp_number in CT_amp_numbers: 3335 loop_amp_ID_to_config[CT_amp_number] = config 3336 3337 # Now add here the loop amplitudes. 3338 loop_amp_numbers = [a.get('amplitudes')[0].get('number') 3339 for a in sum([diagrams[idiag].get_loop_amplitudes() for \ 3340 idiag in config_to_diag_dict[config]], [])] 3341 3342 for loop_amp_number in loop_amp_numbers: 3343 loop_amp_ID_to_config[loop_amp_number] = config 3344 3345 # Notice that the config_id's are not necessarily sequential here, so 3346 # the size of the config_index_map array has to be the maximum over all 3347 # config_ids. 3348 # config_index_map should never be empty unless there was no diagram, 3349 # so the expression below is ok. 3350 n_configs = max(config_index_map.keys()) 3351 replace_dict['nmultichannel_configs'] = n_configs 3352 3353 # We must fill the empty entries of the map with the dummy amplitude 3354 # number 0. 3355 conf_list = [(config_index_map[i] if i in config_index_map else 0) \ 3356 for i in range(1,n_configs+1)] 3357 # Now the placeholder 'nmultichannels' refers to the number of 3358 # multi-channels which are contributing, so we must filter out zeros. 3359 replace_dict['nmultichannels'] = len([_ for _ in conf_list if _!=0]) 3360 3361 # Now write the amp2 related inputs in the replacement dictionary 3362 res_list = [] 3363 chunk_size = 6 3364 for k in xrange(0, len(conf_list), chunk_size): 3365 res_list.append("DATA (config_index_map(i),i=%6r,%6r) /%s/" % \ 3366 (k + 1, min(k + chunk_size, len(conf_list)), 3367 ','.join(["%6r" % i for i in conf_list[k:k + chunk_size]]))) 3368 3369 replace_dict['config_index_map_definition'] = '\n'.join(res_list) 3370 3371 res_list = [] 3372 n_loop_amps = max(loop_amp_ID_to_config.keys()) 3373 amp_list = [loop_amp_ID_to_config[i] for i in \ 3374 sorted(loop_amp_ID_to_config.keys()) if i!=0] 3375 chunk_size = 6 3376 for k in xrange(0, len(amp_list), chunk_size): 3377 res_list.append("DATA (CONFIG_MAP(i),i=%6r,%6r) /%s/" % \ 3378 (k + 1, min(k + chunk_size, len(amp_list)), 3379 ','.join(["%6r" % i for i in amp_list[k:k + chunk_size]]))) 3380 3381 replace_dict['config_map_definition'] = '\n'.join(res_list) 3382 3383 return
3384 3385 #=============================================================================== 3386 # LoopInducedExporterMENoGroup 3387 #===============================================================================
3388 -class LoopInducedExporterMENoGroup(LoopInducedExporterME, 3389 export_v4.ProcessExporterFortranME):
3390 """Class to take care of exporting a set of individual loop induced matrix 3391 elements""" 3392 3393 matrix_file = "matrix_loop_induced_madevent.inc" 3394 3400
3401 - def write_source_makefile(self, *args, **opts):
3402 """Pick the correct write_source_makefile function from 3403 ProcessExporterFortran""" 3404 3405 super(export_v4.ProcessExporterFortranME,self).\ 3406 write_source_makefile(*args, **opts)
3407
3408 - def copy_template(self, *args, **opts):
3409 """Pick the right mother functions 3410 """ 3411 # Call specifically the necessary building functions for the mixed 3412 # template setup for both MadEvent and MadLoop standalone 3413 3414 # Start witht the MadEvent one 3415 export_v4.ProcessExporterFortranME.copy_template(self,*args,**opts) 3416 3417 # Then the MadLoop-standalone related one 3418 LoopInducedExporterME.copy_template(self, *args, **opts)
3419
3420 - def finalize(self, *args, **opts):
3421 """Pick the right mother functions 3422 """ 3423 3424 self.proc_characteristic['loop_induced'] = True 3425 # Call specifically what finalize must be used, so that the 3426 # MRO doesn't interfere. 3427 export_v4.ProcessExporterFortranME.finalize(self, *args, **opts) 3428 3429 # And the finilize_v4 from LoopInducedExporterME which essentially takes 3430 # care of MadLoop virtuals initialization 3431 LoopInducedExporterME.finalize(self, *args, **opts)
3432
3433 - def generate_subprocess_directory(self, matrix_element, fortran_model, me_number):
3434 """Generate the Pn directory for a subprocess group in MadEvent, 3435 including the necessary matrix_N.f files, configs.inc and various 3436 other helper files""" 3437 3438 self.unique_id += 1 3439 # Then generate the MadLoop files 3440 calls = self.generate_loop_subprocess(matrix_element,fortran_model, 3441 group_number = me_number, 3442 unique_id=self.unique_id) 3443 3444 3445 # First generate the MadEvent files 3446 calls += export_v4.ProcessExporterFortranME.generate_subprocess_directory( 3447 self, matrix_element, fortran_model, me_number) 3448 return calls
3449
3450 - def get_amp2_lines(self, matrix_element, replace_dict, config_map):
3451 """Return the amp2(i) = sum(amp for diag(i))^2 lines""" 3452 3453 if config_map: 3454 raise MadGraph5Error, 'A configuration map should not be specified'+\ 3455 ' for the Loop induced exporter without grouping.' 3456 3457 nexternal, ninitial = matrix_element.get_nexternal_ninitial() 3458 # Get minimum legs in a vertex 3459 vert_list = [max(diag.get_vertex_leg_numbers()) for diag in \ 3460 matrix_element.get('diagrams') if diag.get_vertex_leg_numbers()!=[]] 3461 minvert = min(vert_list) if vert_list!=[] else 0 3462 3463 # Note that we need to use AMP2 number corresponding to the first 3464 # diagram number used for that AMP2. 3465 # The dictionary below maps the config ID to this corresponding first 3466 # diagram number 3467 config_index_map = {} 3468 # For each diagram number, the dictionary below gives the config_id it 3469 # belongs to or 0 if it doesn't belong to any. 3470 loop_amp_ID_to_config = {} 3471 3472 n_configs = 0 3473 for idiag, diag in enumerate(matrix_element.get('diagrams')): 3474 # Ignore any diagrams with 4-particle vertices. 3475 use_for_multichanneling = True 3476 if diag.get_vertex_leg_numbers()!=[] and max(diag.get_vertex_leg_numbers()) > minvert: 3477 use_for_multichanneling = False 3478 curr_config = 0 3479 else: 3480 n_configs += 1 3481 curr_config = n_configs 3482 3483 if not use_for_multichanneling: 3484 if 0 not in config_index_map: 3485 config_index_map[0] = idiag + 1 3486 else: 3487 config_index_map[curr_config] = idiag + 1 3488 3489 CT_amps = [ a.get('number') for a in diag.get_ct_amplitudes()] 3490 for CT_amp in CT_amps: 3491 loop_amp_ID_to_config[CT_amp] = curr_config 3492 3493 Loop_amps = [a.get('amplitudes')[0].get('number') 3494 for a in diag.get_loop_amplitudes()] 3495 for Loop_amp in Loop_amps: 3496 loop_amp_ID_to_config[Loop_amp] = curr_config 3497 3498 # Now write the amp2 related inputs in the replacement dictionary 3499 n_configs = len([k for k in config_index_map.keys() if k!=0]) 3500 replace_dict['nmultichannel_configs'] = n_configs 3501 # Now the placeholder 'nmultichannels' refers to the number of 3502 # multi-channels which are contributing which, in the non-grouped case 3503 # is always equal to the total number of multi-channels. 3504 replace_dict['nmultichannels'] = n_configs 3505 3506 res_list = [] 3507 conf_list = [config_index_map[i] for i in sorted(config_index_map.keys()) 3508 if i!=0] 3509 chunk_size = 6 3510 for k in xrange(0, len(conf_list), chunk_size): 3511 res_list.append("DATA (config_index_map(i),i=%6r,%6r) /%s/" % \ 3512 (k + 1, min(k + chunk_size, len(conf_list)), 3513 ','.join(["%6r" % i for i in conf_list[k:k + chunk_size]]))) 3514 3515 replace_dict['config_index_map_definition'] = '\n'.join(res_list) 3516 3517 res_list = [] 3518 n_loop_amps = max(loop_amp_ID_to_config.keys()) 3519 amp_list = [loop_amp_ID_to_config[i] for i in \ 3520 sorted(loop_amp_ID_to_config.keys()) if i!=0] 3521 chunk_size = 6 3522 for k in xrange(0, len(amp_list), chunk_size): 3523 res_list.append("DATA (CONFIG_MAP(i),i=%6r,%6r) /%s/" % \ 3524 (k + 1, min(k + chunk_size, len(amp_list)), 3525 ','.join(["%6r" % i for i in amp_list[k:k + chunk_size]]))) 3526 3527 replace_dict['config_map_definition'] = '\n'.join(res_list)
3528