1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 """Methods and classes to export matrix elements to fks format."""
16
17 from distutils import dir_util
18 import glob
19 import logging
20 import os
21 import re
22 import shutil
23 import subprocess
24 import string
25 import copy
26 import platform
27
28 import madgraph.core.color_algebra as color
29 import madgraph.core.helas_objects as helas_objects
30 import madgraph.core.base_objects as base_objects
31 import madgraph.fks.fks_helas_objects as fks_helas_objects
32 import madgraph.fks.fks_base as fks
33 import madgraph.fks.fks_common as fks_common
34 import madgraph.iolibs.drawing_eps as draw
35 import madgraph.iolibs.gen_infohtml as gen_infohtml
36 import madgraph.iolibs.files as files
37 import madgraph.various.misc as misc
38 import madgraph.iolibs.file_writers as writers
39 import madgraph.iolibs.template_files as template_files
40 import madgraph.iolibs.ufo_expression_parsers as parsers
41 import madgraph.iolibs.export_v4 as export_v4
42 import madgraph.loop.loop_exporters as loop_exporters
43 import madgraph.various.q_polynomial as q_polynomial
44 import madgraph.various.banner as banner_mod
45
46 import aloha.create_aloha as create_aloha
47
48 import models.write_param_card as write_param_card
49 import models.check_param_card as check_param_card
50 from madgraph import MadGraph5Error, MG5DIR, InvalidCmd
51 from madgraph.iolibs.files import cp, ln, mv
52
53 pjoin = os.path.join
54
55 _file_path = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0] + '/'
56 logger = logging.getLogger('madgraph.export_fks')
57
58
60 Pdir = args[0]
61 old_pos = args[1]
62 dir_path = args[2]
63
64 devnull = os.open(os.devnull, os.O_RDWR)
65
66 os.chdir(Pdir)
67 subprocess.call([os.path.join(old_pos, dir_path, 'bin', 'internal', 'gen_jpeg-pl')],
68 stdout = devnull)
69 os.chdir(os.path.pardir)
70
71
72
73
74
76 """Class to take care of exporting a set of matrix elements to
77 Fortran (v4) format."""
78
79
80
81
83 """create the directory run_name as a copy of the MadEvent
84 Template, and clean the directory
85 For now it is just the same as copy_v4template, but it will be modified
86 """
87
88 mgme_dir = self.mgme_dir
89 dir_path = self.dir_path
90 clean =self.opt['clean']
91
92
93 if not os.path.isdir(dir_path):
94 if not mgme_dir:
95 raise MadGraph5Error, \
96 "No valid MG_ME path given for MG4 run directory creation."
97 logger.info('initialize a new directory: %s' % \
98 os.path.basename(dir_path))
99 shutil.copytree(os.path.join(mgme_dir, 'Template', 'NLO'), dir_path, True)
100
101 dir_util.copy_tree(pjoin(self.mgme_dir, 'Template', 'Common'),dir_path)
102
103 for card in ['plot_card']:
104 if os.path.isfile(pjoin(self.dir_path, 'Cards',card + '.dat')):
105 try:
106 shutil.copy(pjoin(self.dir_path, 'Cards', card + '.dat'),
107 pjoin(self.dir_path, 'Cards', card + '_default.dat'))
108 except IOError:
109 logger.warning("Failed to move " + card + ".dat to default")
110
111 elif not os.path.isfile(os.path.join(dir_path, 'TemplateVersion.txt')):
112 if not mgme_dir:
113 raise MadGraph5Error, \
114 "No valid MG_ME path given for MG4 run directory creation."
115 try:
116 shutil.copy(os.path.join(mgme_dir, 'MGMEVersion.txt'), dir_path)
117 except IOError:
118 MG5_version = misc.get_pkg_info()
119 open(os.path.join(dir_path, 'MGMEVersion.txt'), 'w').write( \
120 "5." + MG5_version['version'])
121
122
123 if clean:
124 logger.info('remove old information in %s' % os.path.basename(dir_path))
125 if os.environ.has_key('MADGRAPH_BASE'):
126 subprocess.call([os.path.join('bin', 'internal', 'clean_template'),
127 '--web'],cwd=dir_path)
128 else:
129 try:
130 subprocess.call([os.path.join('bin', 'internal', 'clean_template')], \
131 cwd=dir_path)
132 except Exception, why:
133 raise MadGraph5Error('Failed to clean correctly %s: \n %s' \
134 % (os.path.basename(dir_path),why))
135
136 MG_version = misc.get_pkg_info()
137 open(os.path.join(dir_path, 'SubProcesses', 'MGVersion.txt'), 'w').write(
138 MG_version['version'])
139
140
141 self.link_CutTools(dir_path)
142
143 link_tir_libs=[]
144 tir_libs=[]
145 os.remove(os.path.join(self.dir_path,'SubProcesses','makefile_loop.inc'))
146 dirpath = os.path.join(self.dir_path, 'SubProcesses')
147 filename = pjoin(self.dir_path, 'SubProcesses','makefile_loop')
148 calls = self.write_makefile_TIR(writers.MakefileWriter(filename),
149 link_tir_libs,tir_libs)
150 os.remove(os.path.join(self.dir_path,'Source','make_opts.inc'))
151 filename = pjoin(self.dir_path, 'Source','make_opts')
152 calls = self.write_make_opts(writers.MakefileWriter(filename),
153 link_tir_libs,tir_libs)
154
155
156 for card in ['FO_analyse_card', 'shower_card']:
157 try:
158 shutil.copy(pjoin(self.dir_path, 'Cards',
159 card + '.dat'),
160 pjoin(self.dir_path, 'Cards',
161 card + '_default.dat'))
162 except IOError:
163 logger.warning("Failed to copy " + card + ".dat to default")
164
165 cwd = os.getcwd()
166 dirpath = os.path.join(self.dir_path, 'SubProcesses')
167 try:
168 os.chdir(dirpath)
169 except os.error:
170 logger.error('Could not cd to directory %s' % dirpath)
171 return 0
172
173
174 cpfiles= ["SubProcesses/MadLoopParamReader.f",
175 "Cards/MadLoopParams.dat",
176 "SubProcesses/MadLoopParams.inc"]
177
178 for file in cpfiles:
179 shutil.copy(os.path.join(self.loop_dir,'StandAlone/', file),
180 os.path.join(self.dir_path, file))
181
182 shutil.copy(pjoin(self.dir_path, 'Cards','MadLoopParams.dat'),
183 pjoin(self.dir_path, 'Cards','MadLoopParams_default.dat'))
184
185 if os.path.exists(pjoin(self.dir_path, 'Cards', 'MadLoopParams.dat')):
186 self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.dir_path,
187 'Cards', 'MadLoopParams.dat'))
188
189 self.MadLoopparam.write(pjoin(self.dir_path,"SubProcesses",
190 "MadLoopParams.dat"))
191
192
193 MadLoopCommon = open(os.path.join(self.loop_dir,'StandAlone',
194 "SubProcesses","MadLoopCommons.inc")).read()
195 writer = writers.FortranWriter(os.path.join(self.dir_path,
196 "SubProcesses","MadLoopCommons.f"))
197 writer.writelines(MadLoopCommon%{
198 'print_banner_commands':self.MadLoop_banner},
199 context={'collier_available':False})
200 writer.close()
201
202
203 self.write_mp_files(writers.FortranWriter('cts_mprec.h'),\
204 writers.FortranWriter('cts_mpc.h'))
205
206
207
208 FKS_card_path = pjoin(self.dir_path,'Cards','FKS_params.dat')
209 FKS_card_file = open(FKS_card_path,'r')
210 FKS_card = FKS_card_file.read()
211 FKS_card_file.close()
212 FKS_card = re.sub(r"#NHelForMCoverHels\n-?\d+",
213 "#NHelForMCoverHels\n-1", FKS_card)
214 FKS_card_file = open(FKS_card_path,'w')
215 FKS_card_file.write(FKS_card)
216 FKS_card_file.close()
217
218
219 os.chdir(cwd)
220
221 self.copy_python_files()
222
223
224 self.write_pdf_opendata()
225
226
227
228
229
231 """ Create the file makefile_loop which links to the TIR libraries."""
232
233 file = open(os.path.join(self.mgme_dir,'Template','NLO',
234 'SubProcesses','makefile_loop.inc')).read()
235 replace_dict={}
236 replace_dict['link_tir_libs']=' '.join(link_tir_libs)
237 replace_dict['tir_libs']=' '.join(tir_libs)
238 replace_dict['dotf']='%.f'
239 replace_dict['doto']='%.o'
240 replace_dict['tir_include']=' '.join(tir_include)
241 file=file%replace_dict
242 if writer:
243 writer.writelines(file)
244 else:
245 return file
246
247
249 """ Create the file make_opts which links to the TIR libraries."""
250 file = open(os.path.join(self.mgme_dir,'Template','NLO',
251 'Source','make_opts.inc')).read()
252 replace_dict={}
253 replace_dict['link_tir_libs']=' '.join(link_tir_libs)
254 replace_dict['tir_libs']=' '.join(tir_libs)
255 replace_dict['dotf']='%.f'
256 replace_dict['doto']='%.o'
257 file=file%replace_dict
258 if writer:
259 writer.writelines(file)
260 else:
261 return file
262
263
264
265
267 """copy python files required for the Template"""
268
269 files_to_copy = [ \
270 pjoin('interface','amcatnlo_run_interface.py'),
271 pjoin('interface','extended_cmd.py'),
272 pjoin('interface','common_run_interface.py'),
273 pjoin('interface','coloring_logging.py'),
274 pjoin('various','misc.py'),
275 pjoin('various','shower_card.py'),
276 pjoin('various','FO_analyse_card.py'),
277 pjoin('various','histograms.py'),
278 pjoin('various','banner.py'),
279 pjoin('various','cluster.py'),
280 pjoin('various','systematics.py'),
281 pjoin('various','lhe_parser.py'),
282 pjoin('madevent','sum_html.py'),
283 pjoin('madevent','gen_crossxhtml.py'),
284 pjoin('iolibs','files.py'),
285 pjoin('iolibs','save_load_object.py'),
286 pjoin('iolibs','file_writers.py'),
287 pjoin('..','models','check_param_card.py'),
288 pjoin('__init__.py')
289 ]
290 cp(_file_path+'/interface/.mg5_logging.conf',
291 self.dir_path+'/bin/internal/me5_logging.conf')
292
293 for cp_file in files_to_copy:
294 cp(pjoin(_file_path,cp_file),
295 pjoin(self.dir_path,'bin','internal',os.path.basename(cp_file)))
296
297 - def convert_model(self, model, wanted_lorentz = [],
298 wanted_couplings = []):
299
300 super(ProcessExporterFortranFKS,self).convert_model(model,
301 wanted_lorentz, wanted_couplings)
302
303 IGNORE_PATTERNS = ('*.pyc','*.dat','*.py~')
304 try:
305 shutil.rmtree(pjoin(self.dir_path,'bin','internal','ufomodel'))
306 except OSError as error:
307 pass
308 model_path = model.get('modelpath')
309 shutil.copytree(model_path,
310 pjoin(self.dir_path,'bin','internal','ufomodel'),
311 ignore=shutil.ignore_patterns(*IGNORE_PATTERNS))
312 if hasattr(model, 'restrict_card'):
313 out_path = pjoin(self.dir_path, 'bin', 'internal','ufomodel',
314 'restrict_default.dat')
315 if isinstance(model.restrict_card, check_param_card.ParamCard):
316 model.restrict_card.write(out_path)
317 else:
318 files.cp(model.restrict_card, out_path)
319
320
321
322
323
324
325 - def write_maxparticles_file(self, writer, maxparticles):
326 """Write the maxparticles.inc file for MadEvent"""
327
328 lines = "integer max_particles, max_branch\n"
329 lines += "parameter (max_particles=%d) \n" % maxparticles
330 lines += "parameter (max_branch=max_particles-1)"
331
332
333 writer.writelines(lines)
334
335 return True
336
337
338
339
340
342 """Write the maxconfigs.inc file for MadEvent"""
343
344 lines = "integer lmaxconfigs\n"
345 lines += "parameter (lmaxconfigs=%d)" % maxconfigs
346
347
348 writer.writelines(lines)
349
350 return True
351
352
353
354
355
357 """ write an equivalent of the MG4 proc_card in order that all the Madevent
358 Perl script of MadEvent4 are still working properly for pure MG5 run."""
359
360 proc_card_template = template_files.mg4_proc_card.mg4_template
361 process_template = template_files.mg4_proc_card.process_template
362 process_text = ''
363 coupling = ''
364 new_process_content = []
365
366
367
368 process_str = process_str.replace(' =', '=')
369 process_str = process_str.replace('= ', '=')
370 process_str = process_str.replace(',',' , ')
371
372 for info in process_str.split():
373 if '=' in info:
374 coupling += info + '\n'
375 else:
376 new_process_content.append(info)
377
378
379 process_str = ' '.join(new_process_content)
380
381
382 process_text += process_template.substitute({'process': process_str, \
383 'coupling': coupling})
384
385 text = proc_card_template.substitute({'process': process_text,
386 'model': modelname,
387 'multiparticle':''})
388 ff = open(file_pos, 'w')
389 ff.write(text)
390 ff.close()
391
392
393
394
395
397 """ Write an initial state process map. Each possible PDF
398 combination gets an unique identifier."""
399
400 text=''
401 for i,e in enumerate(initial_states):
402 if len(e) ==1:
403 e.append(0)
404 text=text+str(i+1)+' '+str(len(e))
405 for t in e:
406 text=text+' '
407 try:
408 for p in t:
409 text=text+' '+str(p)
410 except TypeError:
411 text=text+' '+str(t)
412 text=text+'\n'
413
414 ff = open(file_pos, 'w')
415 ff.write(text)
416 ff.close()
417
419 """ A function returning a string uniquely identifying the matrix
420 element given in argument so that it can be used as a prefix to all
421 MadLoop5 subroutines and common blocks related to it. This allows
422 to compile several processes into one library as requested by the
423 BLHA (Binoth LesHouches Accord) guidelines. The MadFKS design
424 necessitates that there is no process prefix."""
425
426 return ''
427
428
429
430
432 """writes the coef_specs.inc in the DHELAS folder. Should not be called in the
433 non-optimized mode"""
434 raise fks_common.FKSProcessError(), \
435 "write_coef_specs should be called only in the loop-optimized mode"
436
437
438
439
440
441 - def generate_directories_fks(self, matrix_element, fortran_model, me_number,
442 me_ntot, path=os.getcwd(),OLP='MadLoop'):
443 """Generate the Pxxxxx_i directories for a subprocess in MadFKS,
444 including the necessary matrix.f and various helper files"""
445 proc = matrix_element.born_me['processes'][0]
446
447 if not self.model:
448 self.model = matrix_element.get('processes')[0].get('model')
449
450 cwd = os.getcwd()
451 try:
452 os.chdir(path)
453 except OSError, error:
454 error_msg = "The directory %s should exist in order to be able " % path + \
455 "to \"export\" in it. If you see this error message by " + \
456 "typing the command \"export\" please consider to use " + \
457 "instead the command \"output\". "
458 raise MadGraph5Error, error_msg
459
460 calls = 0
461
462 self.fksdirs = []
463
464 borndir = "P%s" % \
465 (matrix_element.born_me.get('processes')[0].shell_string())
466 os.mkdir(borndir)
467 os.chdir(borndir)
468 logger.info('Writing files in %s (%d / %d)' % (borndir, me_number + 1, me_ntot))
469
470
471 self.generate_born_fks_files(matrix_element,
472 fortran_model, me_number, path)
473
474
475
476 if OLP=='NJET':
477 filename = 'OLE_order.lh'
478 self.write_lh_order(filename, [matrix_element.born_me.get('processes')[0]], OLP)
479
480 if matrix_element.virt_matrix_element:
481 calls += self.generate_virt_directory( \
482 matrix_element.virt_matrix_element, \
483 fortran_model, \
484 os.path.join(path, borndir))
485
486
487 sqsorders_list = \
488 self.write_real_matrix_elements(matrix_element, fortran_model)
489
490 filename = 'extra_cnt_wrapper.f'
491 self.write_extra_cnt_wrapper(writers.FortranWriter(filename),
492 matrix_element.extra_cnt_me_list,
493 fortran_model)
494 for i, extra_cnt_me in enumerate(matrix_element.extra_cnt_me_list):
495 replace_dict = {}
496
497 den_factor_lines = self.get_den_factor_lines(matrix_element,
498 extra_cnt_me)
499 replace_dict['den_factor_lines'] = '\n'.join(den_factor_lines)
500
501 ij_lines = self.get_ij_lines(matrix_element)
502 replace_dict['ij_lines'] = '\n'.join(ij_lines)
503
504 filename = 'born_cnt_%d.f' % (i+1)
505 self.write_split_me_fks(writers.FortranWriter(filename),
506 extra_cnt_me,
507 fortran_model, 'cnt', '%d' % (i+1),
508 replace_dict)
509
510 self.write_pdf_calls(matrix_element, fortran_model)
511
512 filename = 'nFKSconfigs.inc'
513 self.write_nfksconfigs_file(writers.FortranWriter(filename),
514 matrix_element,
515 fortran_model)
516
517 filename = 'iproc.dat'
518 self.write_iproc_file(writers.FortranWriter(filename),
519 me_number)
520
521 filename = 'fks_info.inc'
522
523 self.proc_characteristic['splitting_types'] = list(\
524 set(self.proc_characteristic['splitting_types']).union(\
525 self.write_fks_info_file(writers.FortranWriter(filename),
526 matrix_element,
527 fortran_model)))
528
529 filename = 'leshouche_info.dat'
530 nfksconfs,maxproc,maxflow,nexternal=\
531 self.write_leshouche_info_file(filename,matrix_element)
532
533
534
535 if nfksconfs == maxproc == maxflow == 0:
536 nfksconfs = 1
537 (dummylines, maxproc, maxflow) = self.get_leshouche_lines(
538 matrix_element.born_me, 1)
539
540 filename = 'leshouche_decl.inc'
541 self.write_leshouche_info_declarations(
542 writers.FortranWriter(filename),
543 nfksconfs,maxproc,maxflow,nexternal,
544 fortran_model)
545 filename = 'genps.inc'
546 ngraphs = matrix_element.born_me.get_number_of_amplitudes()
547 ncolor = max(1,len(matrix_element.born_me.get('color_basis')))
548 self.write_genps(writers.FortranWriter(filename),maxproc,ngraphs,\
549 ncolor,maxflow,fortran_model)
550
551 filename = 'configs_and_props_info.dat'
552 nconfigs,max_leg_number=self.write_configs_and_props_info_file(
553 filename,
554 matrix_element)
555
556 filename = 'configs_and_props_decl.inc'
557 self.write_configs_and_props_info_declarations(
558 writers.FortranWriter(filename),
559 nconfigs,max_leg_number,nfksconfs,
560 fortran_model)
561
562
563 filename = 'real_from_born_configs.inc'
564 self.write_real_from_born_configs_dummy(
565 writers.FortranWriter(filename),
566 matrix_element,
567 fortran_model)
568
569 filename = 'ngraphs.inc'
570 self.write_ngraphs_file(writers.FortranWriter(filename),
571 nconfigs)
572
573
574 filename_me = 'real_me_chooser.f'
575 filename_lum = 'parton_lum_chooser.f'
576 self.write_real_wrappers(writers.FortranWriter(filename_me),
577 writers.FortranWriter(filename_lum),
578 matrix_element, sqsorders_list,
579 fortran_model)
580
581 filename = 'get_color.f'
582 self.write_colors_file(writers.FortranWriter(filename),
583 matrix_element)
584
585 filename = 'nexternal.inc'
586 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial()
587 self.write_nexternal_file(writers.FortranWriter(filename),
588 nexternal, ninitial)
589
590 filename = 'orders.inc'
591 amp_split_orders = self.write_orders_file(
592 writers.FortranWriter(filename),
593 matrix_element)
594
595 filename = 'amp_split_orders.inc'
596 self.write_amp_split_orders_file(
597 writers.FortranWriter(filename),
598 amp_split_orders)
599 self.proc_characteristic['ninitial'] = ninitial
600 self.proc_characteristic['nexternal'] = max(self.proc_characteristic['nexternal'], nexternal)
601
602 filename = 'pmass.inc'
603 try:
604 self.write_pmass_file(writers.FortranWriter(filename),
605 matrix_element.real_processes[0].matrix_element)
606 except IndexError:
607 self.write_pmass_file(writers.FortranWriter(filename),
608 matrix_element.born_me)
609
610
611 self.draw_feynman_diagrams(matrix_element)
612
613 linkfiles = ['BinothLHADummy.f',
614 'check_poles.f',
615 'MCmasses_HERWIG6.inc',
616 'MCmasses_HERWIGPP.inc',
617 'MCmasses_PYTHIA6Q.inc',
618 'MCmasses_PYTHIA6PT.inc',
619 'MCmasses_PYTHIA8.inc',
620 'add_write_info.f',
621 'coupl.inc',
622 'cuts.f',
623 'FKS_params.dat',
624 'initial_states_map.dat',
625 'OLE_order.olc',
626 'FKSParams.inc',
627 'FKSParamReader.f',
628 'cuts.inc',
629 'unlops.inc',
630 'pythia_unlops.f',
631 'driver_mintMC.f',
632 'driver_mintFO.f',
633 'appl_interface.cc',
634 'appl_interface_dummy.f',
635 'appl_common.inc',
636 'reweight_appl.inc',
637 'fastjetfortran_madfks_core.cc',
638 'fastjetfortran_madfks_full.cc',
639 'fjcore.cc',
640 'fastjet_wrapper.f',
641 'fjcore.hh',
642 'fks_Sij.f',
643 'fks_powers.inc',
644 'fks_singular.f',
645 'splitorders_stuff.f',
646 'chooser_functions.f',
647 'veto_xsec.f',
648 'veto_xsec.inc',
649 'weight_lines.f',
650 'genps_fks.f',
651 'boostwdir2.f',
652 'madfks_mcatnlo.inc',
653 'open_output_files.f',
654 'open_output_files_dummy.f',
655 'HwU_dummy.f',
656 'madfks_plot.f',
657 'analysis_dummy.f',
658 'analysis_lhe.f',
659 'mint-integrator2.f',
660 'MC_integer.f',
661 'mint.inc',
662 'montecarlocounter.f',
663 'q_es.inc',
664 'recluster.cc',
665 'Boosts.h',
666 'reweight_xsec.f',
667 'reweight_xsec_events.f',
668 'reweight_xsec_events_pdf_dummy.f',
669 'iproc_map.f',
670 'run.inc',
671 'run_card.inc',
672 'setcuts.f',
673 'setscales.f',
674 'test_soft_col_limits.f',
675 'symmetry_fks_v3.f',
676 'vegas2.for',
677 'write_ajob.f',
678 'handling_lhe_events.f',
679 'write_event.f',
680 'fill_MC_mshell.f',
681 'maxparticles.inc',
682 'message.inc',
683 'initcluster.f',
684 'cluster.inc',
685 'cluster.f',
686 'reweight.f',
687 'randinit',
688 'sudakov.inc',
689 'maxconfigs.inc',
690 'timing_variables.inc']
691
692 for file in linkfiles:
693 ln('../' + file , '.')
694 os.system("ln -s ../../Cards/param_card.dat .")
695
696
697 os.system("ln -s ../makefile_fks_dir ./makefile")
698 if matrix_element.virt_matrix_element:
699 os.system("ln -s ../BinothLHA.f ./BinothLHA.f")
700 elif OLP!='MadLoop':
701 os.system("ln -s ../BinothLHA_OLP.f ./BinothLHA.f")
702 else:
703 os.system("ln -s ../BinothLHA_user.f ./BinothLHA.f")
704
705
706 os.chdir(os.path.pardir)
707
708 filename = 'subproc.mg'
709 files.append_to_file(filename,
710 self.write_subproc,
711 borndir)
712
713 os.chdir(cwd)
714
715 gen_infohtml.make_info_html_nlo(self.dir_path)
716
717
718 return calls
719
720
721
722
724 """ """
725
726 run_card = banner_mod.RunCardNLO()
727
728 run_card.create_default_for_process(self.proc_characteristic,
729 history,
730 processes)
731
732 run_card.write(pjoin(self.dir_path, 'Cards', 'run_card_default.dat'))
733 run_card.write(pjoin(self.dir_path, 'Cards', 'run_card.dat'))
734
735
746
747 - def finalize(self, matrix_elements, history, mg5options, flaglist):
748 """Finalize FKS directory by creating jpeg diagrams, html
749 pages,proc_card_mg5.dat and madevent.tar.gz and create the MA5 card if
750 necessary."""
751
752 devnull = os.open(os.devnull, os.O_RDWR)
753 try:
754 res = misc.call([mg5options['lhapdf'], '--version'], \
755 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
756 except Exception:
757 res = 1
758 if res != 0:
759 logger.info('The value for lhapdf in the current configuration does not ' + \
760 'correspond to a valid executable.\nPlease set it correctly either in ' + \
761 'input/mg5_configuration or with "set lhapdf /path/to/lhapdf-config" ' + \
762 'and regenrate the process. \nTo avoid regeneration, edit the ' + \
763 ('%s/Cards/amcatnlo_configuration.txt file.\n' % self.dir_path ) + \
764 'Note that you can still compile and run aMC@NLO with the built-in PDFs\n')
765
766 compiler_dict = {'fortran': mg5options['fortran_compiler'],
767 'cpp': mg5options['cpp_compiler'],
768 'f2py': mg5options['f2py_compiler']}
769
770 if 'nojpeg' in flaglist:
771 makejpg = False
772 else:
773 makejpg = True
774 output_dependencies = mg5options['output_dependencies']
775
776
777 self.proc_characteristic['grouped_matrix'] = False
778 self.proc_characteristic['complex_mass_scheme'] = mg5options['complex_mass_scheme']
779
780 perturbation_order = []
781 firstprocess = history.get('generate')
782 order = re.findall("\[(.*)\]", firstprocess)
783 if 'QED' in order[0]:
784 perturbation_order.append('QED')
785 if 'QCD' in order[0]:
786 perturbation_order.append('QCD')
787 self.proc_characteristic['perturbation_order'] = perturbation_order
788
789 self.create_proc_charac()
790
791 self.create_run_card(matrix_elements.get_processes(), history)
792
793
794
795
796
797
798
799
800 filename = os.path.join(self.dir_path,'Source','MODEL','get_mass_width_fcts.f')
801 makeinc = os.path.join(self.dir_path,'Source','MODEL','makeinc.inc')
802 self.write_get_mass_width_file(writers.FortranWriter(filename), makeinc, self.model)
803
804
805
806 filename = os.path.join(self.dir_path,'Source','maxconfigs.inc')
807 self.write_maxconfigs_file(writers.FortranWriter(filename),
808 matrix_elements.get_max_configs())
809
810
811 filename = os.path.join(self.dir_path,'Source','maxparticles.inc')
812 self.write_maxparticles_file(writers.FortranWriter(filename),
813 matrix_elements.get_max_particles())
814
815
816 os.system('touch %s/done' % os.path.join(self.dir_path,'SubProcesses'))
817
818
819 fcompiler_chosen = self.set_fortran_compiler(compiler_dict)
820 ccompiler_chosen = self.set_cpp_compiler(compiler_dict['cpp'])
821
822 old_pos = os.getcwd()
823 os.chdir(os.path.join(self.dir_path, 'SubProcesses'))
824 P_dir_list = [proc for proc in os.listdir('.') if os.path.isdir(proc) and \
825 proc[0] == 'P']
826
827 devnull = os.open(os.devnull, os.O_RDWR)
828
829 if makejpg:
830 logger.info("Generate jpeg diagrams")
831 for Pdir in P_dir_list:
832 os.chdir(Pdir)
833 subprocess.call([os.path.join(old_pos, self.dir_path, 'bin', 'internal', 'gen_jpeg-pl')],
834 stdout = devnull)
835 os.chdir(os.path.pardir)
836
837 logger.info("Generate web pages")
838
839
840 subprocess.call([os.path.join(old_pos, self.dir_path, 'bin', 'internal', 'gen_cardhtml-pl')], \
841 stdout = devnull)
842
843 os.chdir(os.path.pardir)
844
845
846
847
848
849
850
851
852
853
854 if os.path.isdir('Cards'):
855 output_file = os.path.join('Cards', 'proc_card_mg5.dat')
856 history.write(output_file)
857
858
859 for card in ['run_card', 'FO_analyse_card', 'shower_card']:
860 try:
861 shutil.copy(pjoin(self.dir_path, 'Cards',
862 card + '.dat'),
863 pjoin(self.dir_path, 'Cards',
864 card + '_default.dat'))
865 except IOError:
866 logger.warning("Failed to copy " + card + ".dat to default")
867
868
869 subprocess.call([os.path.join(old_pos, self.dir_path, 'bin', 'internal', 'gen_cardhtml-pl')],
870 stdout = devnull)
871
872
873 if os.path.exists(pjoin('SubProcesses', 'subproc.mg')):
874 if os.path.exists('amcatnlo.tar.gz'):
875 os.remove('amcatnlo.tar.gz')
876 subprocess.call([os.path.join(old_pos, self.dir_path, 'bin', 'internal', 'make_amcatnlo_tar')],
877 stdout = devnull)
878
879 subprocess.call([os.path.join(old_pos, self.dir_path, 'bin', 'internal', 'gen_cardhtml-pl')],
880 stdout = devnull)
881
882
883 os.chdir(old_pos)
884
885
886
887 base_compiler= ['FC=g77','FC=gfortran']
888
889 StdHep_path = pjoin(MG5DIR, 'vendor', 'StdHEP')
890
891 if output_dependencies == 'external':
892
893 if not os.path.exists(pjoin(MG5DIR, 'vendor', 'StdHEP', 'lib', 'libstdhep.a')) or \
894 not os.path.exists(pjoin(MG5DIR, 'vendor', 'StdHEP', 'lib', 'libFmcfio.a')):
895 if 'FC' not in os.environ or not os.environ['FC']:
896 path = os.path.join(StdHep_path, 'src', 'make_opts')
897 text = open(path).read()
898 for base in base_compiler:
899 text = text.replace(base,'FC=%s' % fcompiler_chosen)
900 open(path, 'w').writelines(text)
901
902 logger.info('Compiling StdHEP. This has to be done only once.')
903 misc.compile(cwd = pjoin(MG5DIR, 'vendor', 'StdHEP'))
904 logger.info('Done.')
905
906 files.ln(pjoin(StdHep_path, 'lib', 'libstdhep.a'), \
907 pjoin(self.dir_path, 'MCatNLO', 'lib'))
908 files.ln(pjoin(StdHep_path, 'lib', 'libFmcfio.a'), \
909 pjoin(self.dir_path, 'MCatNLO', 'lib'))
910
911 elif output_dependencies == 'internal':
912 StdHEP_internal_path = pjoin(self.dir_path,'Source','StdHEP')
913 shutil.copytree(StdHep_path,StdHEP_internal_path, symlinks=True)
914
915 linkfiles = ['libstdhep.a', 'libFmcfio.a']
916 for file in linkfiles:
917 ln(pjoin(os.path.pardir,os.path.pardir,'Source','StdHEP','lib',file),
918 os.path.join(self.dir_path, 'MCatNLO', 'lib'))
919 if 'FC' not in os.environ or not os.environ['FC']:
920 path = pjoin(StdHEP_internal_path, 'src', 'make_opts')
921 text = open(path).read()
922 for base in base_compiler:
923 text = text.replace(base,'FC=%s' % fcompiler_chosen)
924 open(path, 'w').writelines(text)
925
926 misc.compile(['clean'],cwd = StdHEP_internal_path)
927
928 elif output_dependencies == 'environment_paths':
929
930
931 libStdHep = misc.which_lib('libstdhep.a')
932 libFmcfio = misc.which_lib('libFmcfio.a')
933 if not libStdHep is None and not libFmcfio is None:
934 logger.info('MG5_aMC is using StdHep installation found at %s.'%\
935 os.path.dirname(libStdHep))
936 ln(pjoin(libStdHep),pjoin(self.dir_path, 'MCatNLO', 'lib'),abspath=True)
937 ln(pjoin(libFmcfio),pjoin(self.dir_path, 'MCatNLO', 'lib'),abspath=True)
938 else:
939 raise InvalidCmd("Could not find the location of the files"+\
940 " libstdhep.a and libFmcfio.a in you environment paths.")
941
942 else:
943 raise MadGraph5Error, 'output_dependencies option %s not recognized'\
944 %output_dependencies
945
946
947 if 'madanalysis5_path' in self.opt and not \
948 self.opt['madanalysis5_path'] is None and not self.proc_defs is None:
949
950 processes = sum([me.get('processes') if not isinstance(me, str) else [] \
951 for me in matrix_elements.get('matrix_elements')],[])
952
953
954
955 if len(processes)==0:
956 processes = self.born_processes
957 if len(processes)==0:
958 logger.warning(
959 """MG5aMC could not provide to Madanalysis5 the list of processes generated.
960 As a result, the default card will not be tailored to the process generated.
961 This typically happens when using the 'low_mem_multicore_nlo_generation' NLO generation mode.""")
962
963
964 self.create_default_madanalysis5_cards(
965 history, self.proc_defs, [processes,]*len(self.proc_defs),
966 self.opt['madanalysis5_path'], pjoin(self.dir_path,'Cards'),
967 levels =['hadron'])
968
969
970
971
972
974 """Writes the real_from_born_configs.inc file that contains
975 the mapping to go for a given born configuration (that is used
976 e.g. in the multi-channel phase-space integration to the
977 corresponding real-emission diagram, i.e. the real emission
978 diagram in which the combined ij is split in i_fks and
979 j_fks."""
980 lines = []
981 lines2 = []
982 max_links = 0
983 born_me = matrix_element.born_me
984 for iFKS, conf in enumerate(matrix_element.get_fks_info_list()):
985 iFKS = iFKS+1
986 links = conf['fks_info']['rb_links']
987 max_links = max(max_links,len(links))
988 for i,diags in enumerate(links):
989 if not i == diags['born_conf']:
990 print links
991 raise MadGraph5Error, "born_conf should be canonically ordered"
992 real_configs = ', '.join(['%d' % int(diags['real_conf']+1) for diags in links])
993 lines.append("data (real_from_born_conf(irfbc,%d),irfbc=1,%d) /%s/" \
994 % (iFKS,len(links),real_configs))
995
996
997
998 if not matrix_element.get_fks_info_list():
999
1000 base_diagrams = born_me.get('base_amplitude').get('diagrams')
1001 minvert = min([max([len(vert.get('legs')) for vert in \
1002 diag.get('vertices')]) for diag in base_diagrams])
1003
1004 for idiag, diag in enumerate(base_diagrams):
1005 if any([len(vert.get('legs')) > minvert for vert in
1006 diag.get('vertices')]):
1007
1008 continue
1009 max_links = max_links + 1
1010
1011 real_configs=', '.join(['%d' % i for i in range(1, max_links+1)])
1012 lines.append("data (real_from_born_conf(irfbc,%d),irfbc=1,%d) /%s/" \
1013 % (1,max_links,real_configs))
1014
1015 lines2.append("integer irfbc")
1016 lines2.append("integer real_from_born_conf(%d,%d)" \
1017 % (max_links, max(len(matrix_element.get_fks_info_list()),1)))
1018
1019 writer.writelines(lines2+lines)
1020
1022 """write a dummy file"""
1023 max_links = 10
1024 lines2 = []
1025 lines2.append("integer irfbc")
1026 lines2.append("integer real_from_born_conf(%d,%d)" \
1027 % (max_links,len(matrix_element.get_fks_info_list())))
1028
1029 writer.writelines(lines2)
1030
1031
1033 """ write the include file with the information of the coupling power for the
1034 differen entries in the amp_split array"""
1035 text = "integer iaso, amp_split_orders(%d, nsplitorders)\n" % len(amp_split_orders)
1036
1037 for i, amp_orders in enumerate(amp_split_orders):
1038 text+= "data (amp_split_orders(%d, iaso), iaso=1,nsplitorders) / %s /\n" % \
1039 (i + 1, ', '.join(['%d' % o for o in amp_orders]))
1040
1041 writer.writelines(text)
1042
1043
1045 """writes the include file with the informations about coupling orders.
1046 In particular this file should contain the constraints requested by the user
1047 for all the orders which are split"""
1048
1049 born_orders = {}
1050 for ordd, val in matrix_element.born_me['processes'][0]['born_orders'].items():
1051
1052 born_orders[ordd] = 2 * val
1053
1054 nlo_orders = {}
1055 for ordd, val in matrix_element.born_me['processes'][0]['squared_orders'].items():
1056
1057 nlo_orders[ordd] = val
1058
1059 split_orders = \
1060 matrix_element.born_me['processes'][0]['split_orders']
1061
1062 pert_orders = \
1063 matrix_element.born_me['processes'][0]['perturbation_couplings']
1064
1065 max_born_orders = {}
1066 max_nlo_orders = {}
1067
1068 model = matrix_element.born_me['processes'][0]['model']
1069
1070
1071 if born_orders.keys() == ['WEIGHTED']:
1072
1073
1074 wgt_ord_max = born_orders['WEIGHTED']
1075 squared_orders, amp_orders = matrix_element.born_me.get_split_orders_mapping()
1076 for sq_order in squared_orders:
1077
1078
1079 ord_dict = {}
1080 assert len(sq_order) == len(split_orders)
1081 for o, v in zip(split_orders, list(sq_order)):
1082 ord_dict[o] = v
1083
1084 wgt = sum([v * model.get('order_hierarchy')[o] for \
1085 o, v in ord_dict.items()])
1086 if wgt > wgt_ord_max:
1087 continue
1088
1089 for o, v in ord_dict.items():
1090 try:
1091 max_born_orders[o] = max(max_born_orders[o], v)
1092 except KeyError:
1093 max_born_orders[o] = v
1094
1095 else:
1096 for o in [oo for oo in split_orders if oo != 'WEIGHTED']:
1097 try:
1098 max_born_orders[o] = born_orders[o]
1099 except KeyError:
1100
1101 max_born_orders[o] = 1000
1102 try:
1103 max_nlo_orders[o] = nlo_orders[o]
1104 except KeyError:
1105
1106 max_nlo_orders[o] = 1000
1107
1108
1109
1110 qcd_pos = -1
1111 qed_pos = -1
1112 if 'QCD' in split_orders:
1113 qcd_pos = split_orders.index('QCD') + 1
1114 if 'QED' in split_orders:
1115 qed_pos = split_orders.index('QED') + 1
1116
1117
1118
1119
1120
1121
1122
1123
1124 amp_split_orders = []
1125 squared_orders, amp_orders = matrix_element.born_me.get_split_orders_mapping()
1126 amp_split_size_born = len(squared_orders)
1127 amp_split_orders += squared_orders
1128
1129
1130 for realme in matrix_element.real_processes:
1131 squared_orders, amp_orders = realme.matrix_element.get_split_orders_mapping()
1132 for order in squared_orders:
1133 if not order in amp_split_orders:
1134 amp_split_orders.append(order)
1135
1136
1137
1138
1139
1140 try:
1141 squared_orders, amp_orders = matrix_element.virt_matrix_element.get_split_orders_mapping()
1142 squared_orders = [so[0] for so in squared_orders]
1143 for order in squared_orders:
1144 if not order in amp_split_orders:
1145 amp_split_orders.append(order)
1146 except AttributeError:
1147 pass
1148
1149 amp_split_size=len(amp_split_orders)
1150
1151 text = 'C The orders to be integrated for the Born and at NLO\n'
1152 text += 'integer nsplitorders\n'
1153 text += 'parameter (nsplitorders=%d)\n' % len(split_orders)
1154 text += 'character*3 ordernames(nsplitorders)\n'
1155 text += 'data ordernames / %s /\n' % ', '.join(['"%3s"' % o for o in split_orders])
1156 text += 'integer born_orders(nsplitorders), nlo_orders(nsplitorders)\n'
1157 text += 'C the order of the coupling orders is %s\n' % ', '.join(split_orders)
1158 text += 'data born_orders / %s /\n' % ', '.join([str(max_born_orders[o]) for o in split_orders])
1159 text += 'data nlo_orders / %s /\n' % ', '.join([str(max_nlo_orders[o]) for o in split_orders])
1160 text += 'C The position of the QCD /QED orders in the array\n'
1161 text += 'integer qcd_pos, qed_pos\n'
1162 text += 'C if = -1, then it is not in the split_orders\n'
1163 text += 'parameter (qcd_pos = %d)\n' % qcd_pos
1164 text += 'parameter (qed_pos = %d)\n' % qed_pos
1165 text += 'C this is to keep track of the various coupling combinations entering each ME\n'
1166 text += 'integer amp_split_size, amp_split_size_born\n'
1167 text += 'parameter (amp_split_size = %d)\n' % amp_split_size
1168 text += 'parameter (amp_split_size_born = %d) ! the first entries in amp_split are for the born\n' % amp_split_size_born
1169 text += 'double precision amp_split(amp_split_size)\n'
1170 text += 'double complex amp_split_cnt(amp_split_size,2,nsplitorders)\n'
1171 text += 'common /to_amp_split/amp_split, amp_split_cnt\n'
1172
1173 writer.writelines(text)
1174
1175 return amp_split_orders
1176
1177
1178
1179
1180
1181
1183 """Write the get_mass_width_file.f file for MG4.
1184 Also update the makeinc.inc file
1185 """
1186 mass_particles = [p for p in model['particles'] if p['mass'].lower() != 'zero']
1187 width_particles = [p for p in model['particles'] if p['width'].lower() != 'zero']
1188
1189 iflines_mass = ''
1190 iflines_width = ''
1191
1192 for i, part in enumerate(mass_particles):
1193 if i == 0:
1194 ifstring = 'if'
1195 else:
1196 ifstring = 'else if'
1197 if part['self_antipart']:
1198 iflines_mass += '%s (id.eq.%d) then\n' % \
1199 (ifstring, part.get_pdg_code())
1200 else:
1201 iflines_mass += '%s (id.eq.%d.or.id.eq.%d) then\n' % \
1202 (ifstring, part.get_pdg_code(), part.get_anti_pdg_code())
1203 iflines_mass += 'get_mass_from_id=abs(%s)\n' % part.get('mass')
1204
1205 if mass_particles:
1206 iflines_mass += 'else\n'
1207 else:
1208 iflines_mass = 'if (.true.) then\n'
1209
1210 for i, part in enumerate(width_particles):
1211 if i == 0:
1212 ifstring = 'if'
1213 else:
1214 ifstring = 'else if'
1215 if part['self_antipart']:
1216 iflines_width += '%s (id.eq.%d) then\n' % \
1217 (ifstring, part.get_pdg_code())
1218 else:
1219 iflines_width += '%s (id.eq.%d.or.id.eq.%d) then\n' % \
1220 (ifstring, part.get_pdg_code(), part.get_anti_pdg_code())
1221 iflines_width += 'get_width_from_id=abs(%s)\n' % part.get('width')
1222
1223 if width_particles:
1224 iflines_width += 'else\n'
1225 else:
1226 iflines_width = 'if (.true.) then\n'
1227
1228 replace_dict = {'iflines_mass' : iflines_mass,
1229 'iflines_width' : iflines_width}
1230
1231 file = open(os.path.join(_file_path, \
1232 'iolibs/template_files/get_mass_width_fcts.inc')).read()
1233 file = file % replace_dict
1234
1235
1236 writer.writelines(file)
1237
1238
1239 makeinc_content = open(makeinc).read()
1240 makeinc_content = makeinc_content.replace('MODEL = ', 'MODEL = get_mass_width_fcts.o ')
1241 open(makeinc, 'w').write(makeinc_content)
1242
1243 return
1244
1245
1247 """writes the declarations for the variables relevant for configs_and_props
1248 """
1249 lines = []
1250 lines.append("integer ifr,lmaxconfigs_used,max_branch_used")
1251 lines.append("parameter (lmaxconfigs_used=%4d)" % max_iconfig)
1252 lines.append("parameter (max_branch_used =%4d)" % -max_leg_number)
1253 lines.append("integer mapconfig_d(%3d,0:lmaxconfigs_used)" % nfksconfs)
1254 lines.append("integer iforest_d(%3d,2,-max_branch_used:-1,lmaxconfigs_used)" % nfksconfs)
1255 lines.append("integer sprop_d(%3d,-max_branch_used:-1,lmaxconfigs_used)" % nfksconfs)
1256 lines.append("integer tprid_d(%3d,-max_branch_used:-1,lmaxconfigs_used)" % nfksconfs)
1257 lines.append("double precision pmass_d(%3d,-max_branch_used:-1,lmaxconfigs_used)" % nfksconfs)
1258 lines.append("double precision pwidth_d(%3d,-max_branch_used:-1,lmaxconfigs_used)" % nfksconfs)
1259 lines.append("integer pow_d(%3d,-max_branch_used:-1,lmaxconfigs_used)" % nfksconfs)
1260
1261 writer.writelines(lines)
1262
1263
1265 """writes the configs_and_props_info.inc file that cointains
1266 all the (real-emission) configurations (IFOREST) as well as
1267 the masses and widths of intermediate particles"""
1268 lines = []
1269 lines.append("# C -> MAPCONFIG_D")
1270 lines.append("# F/D -> IFOREST_D")
1271 lines.append("# S -> SPROP_D")
1272 lines.append("# T -> TPRID_D")
1273 lines.append("# M -> PMASS_D/PWIDTH_D")
1274 lines.append("# P -> POW_D")
1275 lines2 = []
1276 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial()
1277
1278 max_iconfig=0
1279 max_leg_number=0
1280
1281
1282
1283
1284 for iFKS, conf in enumerate(matrix_element.get_fks_info_list()):
1285 iFKS=iFKS+1
1286 iconfig = 0
1287 s_and_t_channels = []
1288 mapconfigs = []
1289 fks_matrix_element=matrix_element.real_processes[conf['n_me'] - 1].matrix_element
1290 base_diagrams = fks_matrix_element.get('base_amplitude').get('diagrams')
1291 model = fks_matrix_element.get('base_amplitude').get('process').get('model')
1292 minvert = min([max([len(vert.get('legs')) for vert in \
1293 diag.get('vertices')]) for diag in base_diagrams])
1294
1295 lines.append("# ")
1296 lines.append("# nFKSprocess %d" % iFKS)
1297 for idiag, diag in enumerate(base_diagrams):
1298 if any([len(vert.get('legs')) > minvert for vert in
1299 diag.get('vertices')]):
1300
1301 continue
1302 iconfig = iconfig + 1
1303 helas_diag = fks_matrix_element.get('diagrams')[idiag]
1304 mapconfigs.append(helas_diag.get('number'))
1305 lines.append("# Diagram %d for nFKSprocess %d" % \
1306 (helas_diag.get('number'),iFKS))
1307
1308 lines.append("C %4d %4d %4d " % (iFKS,iconfig,
1309 helas_diag.get('number')))
1310
1311
1312
1313 schannels, tchannels = helas_diag.get('amplitudes')[0].\
1314 get_s_and_t_channels(ninitial, model, 990)
1315
1316 s_and_t_channels.append([schannels, tchannels])
1317
1318
1319 allchannels = schannels
1320 if len(tchannels) > 1:
1321
1322 allchannels = schannels + tchannels
1323
1324 for vert in allchannels:
1325 daughters = [leg.get('number') for leg in vert.get('legs')[:-1]]
1326 last_leg = vert.get('legs')[-1]
1327 lines.append("F %4d %4d %4d %4d" % \
1328 (iFKS,last_leg.get('number'), iconfig, len(daughters)))
1329 for d in daughters:
1330 lines.append("D %4d" % d)
1331 if vert in schannels:
1332 lines.append("S %4d %4d %4d %10d" % \
1333 (iFKS,last_leg.get('number'), iconfig,
1334 last_leg.get('id')))
1335 elif vert in tchannels[:-1]:
1336 lines.append("T %4d %4d %4d %10d" % \
1337 (iFKS,last_leg.get('number'), iconfig,
1338 abs(last_leg.get('id'))))
1339
1340
1341 max_leg_number = min(max_leg_number,last_leg.get('number'))
1342 max_iconfig = max(max_iconfig,iconfig)
1343
1344
1345 lines.append("# Number of configs for nFKSprocess %d" % iFKS)
1346 lines.append("C %4d %4d %4d" % (iFKS,0,iconfig))
1347
1348
1349 lines2.append("# ")
1350 particle_dict = fks_matrix_element.get('processes')[0].get('model').\
1351 get('particle_dict')
1352
1353 for iconf, configs in enumerate(s_and_t_channels):
1354 for vertex in configs[0] + configs[1][:-1]:
1355 leg = vertex.get('legs')[-1]
1356 if leg.get('id') not in particle_dict:
1357
1358 pow_part = 0
1359 else:
1360 particle = particle_dict[leg.get('id')]
1361
1362 pow_part = 1 + int(particle.is_boson())
1363
1364 lines2.append("M %4d %4d %4d %10d " % \
1365 (iFKS,leg.get('number'), iconf + 1, leg.get('id')))
1366 lines2.append("P %4d %4d %4d %4d " % \
1367 (iFKS,leg.get('number'), iconf + 1, pow_part))
1368
1369
1370
1371
1372 if not matrix_element.get_fks_info_list():
1373 born_me = matrix_element.born_me
1374
1375
1376
1377
1378 bornproc = born_me.get('processes')[0]
1379 colors = [l.get('color') for l in bornproc.get('legs')]
1380
1381 fks_i = len(colors)
1382
1383
1384 fks_j=1
1385 for cpos, col in enumerate(colors):
1386 if col != 1:
1387 fks_j = cpos+1
1388 fks_j_id = [l.get('id') for l in bornproc.get('legs')][cpos]
1389
1390
1391 if fks_j > ninitial:
1392 iFKS=1
1393 iconfig = 0
1394 s_and_t_channels = []
1395 mapconfigs = []
1396 base_diagrams = born_me.get('base_amplitude').get('diagrams')
1397 model = born_me.get('base_amplitude').get('process').get('model')
1398 minvert = min([max([len(vert.get('legs')) for vert in \
1399 diag.get('vertices')]) for diag in base_diagrams])
1400
1401 lines.append("# ")
1402 lines.append("# nFKSprocess %d" % iFKS)
1403 for idiag, diag in enumerate(base_diagrams):
1404 if any([len(vert.get('legs')) > minvert for vert in
1405 diag.get('vertices')]):
1406
1407 continue
1408 iconfig = iconfig + 1
1409 helas_diag = born_me.get('diagrams')[idiag]
1410 mapconfigs.append(helas_diag.get('number'))
1411 lines.append("# Diagram %d for nFKSprocess %d" % \
1412 (helas_diag.get('number'),iFKS))
1413
1414 lines.append("C %4d %4d %4d " % (iFKS,iconfig,
1415 helas_diag.get('number')))
1416
1417
1418
1419 schannels, tchannels = helas_diag.get('amplitudes')[0].\
1420 get_s_and_t_channels(ninitial, model, 990)
1421
1422 s_and_t_channels.append([schannels, tchannels])
1423
1424
1425 lines.append("F %4d %4d %4d %4d" % \
1426 (iFKS,-1,iconfig,2))
1427
1428 lines.append("D %4d" % nexternal)
1429 lines.append("D %4d" % fks_j)
1430 lines.append("S %4d %4d %4d %10d" % \
1431 (iFKS,-1, iconfig,fks_j_id))
1432
1433
1434
1435
1436 allchannels = schannels
1437 if len(tchannels) > 1:
1438
1439 allchannels = schannels + tchannels
1440
1441 for vert in allchannels:
1442 daughters = [leg.get('number') for leg in vert.get('legs')[:-1]]
1443 last_leg = vert.get('legs')[-1]
1444 lines.append("F %4d %4d %4d %4d" % \
1445 (iFKS,last_leg.get('number')-1, iconfig, len(daughters)))
1446
1447
1448 for i_dau in range(len(daughters)):
1449 if daughters[i_dau] < 0:
1450 daughters[i_dau] += -1
1451
1452 if fks_j in daughters:
1453 daughters[daughters.index(fks_j)] = -1
1454 for d in daughters:
1455 lines.append("D %4d" % d)
1456 if vert in schannels:
1457 lines.append("S %4d %4d %4d %10d" % \
1458 (iFKS,last_leg.get('number')-1, iconfig,
1459 last_leg.get('id')))
1460 elif vert in tchannels[:-1]:
1461 lines.append("T %4d %4d %4d %10d" % \
1462 (iFKS,last_leg.get('number')-1, iconfig,
1463 abs(last_leg.get('id'))))
1464
1465
1466 max_leg_number = min(max_leg_number,last_leg.get('number')-1)
1467 max_iconfig = max(max_iconfig,iconfig)
1468
1469
1470 lines.append("# Number of configs for nFKSprocess %d" % iFKS)
1471 lines.append("C %4d %4d %4d" % (iFKS,0,iconfig))
1472
1473
1474 lines2.append("# ")
1475 particle_dict = born_me.get('processes')[0].get('model').\
1476 get('particle_dict')
1477
1478 for iconf, configs in enumerate(s_and_t_channels):
1479 lines2.append("M %4d %4d %4d %10d " % \
1480 (iFKS,-1, iconf + 1, fks_j_id))
1481 pow_part = 1 + int(particle_dict[fks_j_id].is_boson())
1482 lines2.append("P %4d %4d %4d %4d " % \
1483 (iFKS,-1, iconf + 1, pow_part))
1484 for vertex in configs[0] + configs[1][:-1]:
1485 leg = vertex.get('legs')[-1]
1486 if leg.get('id') not in particle_dict:
1487
1488 pow_part = 0
1489 else:
1490 particle = particle_dict[leg.get('id')]
1491
1492 pow_part = 1 + int(particle.is_boson())
1493
1494 lines2.append("M %4d %4d %4d %10d " % \
1495 (iFKS,leg.get('number')-1, iconf + 1, leg.get('id')))
1496 lines2.append("P %4d %4d %4d %4d " % \
1497 (iFKS,leg.get('number')-1, iconf + 1, pow_part))
1498
1499
1500 open(filename,'w').write('\n'.join(lines+lines2))
1501
1502 return max_iconfig, max_leg_number
1503
1504
1507 """writes the declarations for the variables relevant for leshouche_info
1508 """
1509 lines = []
1510 lines.append('integer maxproc_used, maxflow_used')
1511 lines.append('parameter (maxproc_used = %d)' % maxproc)
1512 lines.append('parameter (maxflow_used = %d)' % maxflow)
1513 lines.append('integer idup_d(%d,%d,maxproc_used)' % (nfksconfs, nexternal))
1514 lines.append('integer mothup_d(%d,%d,%d,maxproc_used)' % (nfksconfs, 2, nexternal))
1515 lines.append('integer icolup_d(%d,%d,%d,maxflow_used)' % (nfksconfs, 2, nexternal))
1516 lines.append('integer niprocs_d(%d)' % (nfksconfs))
1517
1518 writer.writelines(lines)
1519
1520
1521 - def write_genps(self, writer, maxproc,ngraphs,ncolor,maxflow, fortran_model):
1522 """writes the genps.inc file
1523 """
1524 lines = []
1525 lines.append("include 'maxparticles.inc'")
1526 lines.append("include 'maxconfigs.inc'")
1527 lines.append("integer maxproc,ngraphs,ncolor,maxflow")
1528 lines.append("parameter (maxproc=%d,ngraphs=%d,ncolor=%d,maxflow=%d)" % \
1529 (maxproc,ngraphs,ncolor,maxflow))
1530 writer.writelines(lines)
1531
1532
1534 """writes the leshouche_info.inc file which contains
1535 the LHA informations for all the real emission processes
1536 """
1537 lines = []
1538 lines.append("# I -> IDUP_D")
1539 lines.append("# M -> MOTHUP_D")
1540 lines.append("# C -> ICOLUP_D")
1541 nfksconfs = len(matrix_element.get_fks_info_list())
1542 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial()
1543
1544 maxproc = 0
1545 maxflow = 0
1546 for i, conf in enumerate(matrix_element.get_fks_info_list()):
1547 (newlines, nprocs, nflows) = self.get_leshouche_lines(
1548 matrix_element.real_processes[conf['n_me'] - 1].matrix_element, i + 1)
1549 lines.extend(newlines)
1550 maxproc = max(maxproc, nprocs)
1551 maxflow = max(maxflow, nflows)
1552
1553
1554 if not matrix_element.get_fks_info_list():
1555 (newlines, nprocs, nflows) = self.get_leshouche_lines_dummy(matrix_element.born_me, 1)
1556 lines.extend(newlines)
1557
1558
1559 open(filename,'w').write('\n'.join(lines))
1560
1561 return nfksconfs, maxproc, maxflow, nexternal
1562
1563
1564 - def write_real_wrappers(self, writer_me, writer_lum, matrix_element, sqsolist, fortran_model):
1565 """writes the wrappers which allows to chose among the different real matrix elements
1566 and among the different parton luminosities and
1567 among the various helper functions for the split-orders"""
1568
1569
1570 text = \
1571 """subroutine smatrix_real(p, wgt)
1572 implicit none
1573 include 'nexternal.inc'
1574 double precision p(0:3, nexternal)
1575 double precision wgt
1576 integer nfksprocess
1577 common/c_nfksprocess/nfksprocess
1578 """
1579
1580 text1 = \
1581 """\n\ndouble precision function dlum()
1582 implicit none
1583 integer nfksprocess
1584 common/c_nfksprocess/nfksprocess
1585 """
1586
1587 if matrix_element.real_processes:
1588 for n, info in enumerate(matrix_element.get_fks_info_list()):
1589 text += \
1590 """if (nfksprocess.eq.%(n)d) then
1591 call smatrix%(n_me)d(p, wgt)
1592 else""" % {'n': n + 1, 'n_me' : info['n_me']}
1593 text1 += \
1594 """if (nfksprocess.eq.%(n)d) then
1595 call dlum_%(n_me)d(dlum)
1596 else""" % {'n': n + 1, 'n_me' : info['n_me']}
1597
1598 text += \
1599 """
1600 write(*,*) 'ERROR: invalid n in real_matrix :', nfksprocess
1601 stop\n endif
1602 return \n end
1603 """
1604 text1 += \
1605 """
1606 write(*,*) 'ERROR: invalid n in dlum :', nfksprocess\n stop\n endif
1607 return \nend
1608 """
1609 else:
1610 text += \
1611 """
1612 wgt=0d0
1613 return
1614 end
1615 """
1616 text1 += \
1617 """
1618 call dlum_0(dlum)
1619 return
1620 end
1621 """
1622
1623
1624 writer_me.writelines(text)
1625 writer_lum.writelines(text1)
1626 return 0
1627
1628
1630 """Create the ps files containing the feynman diagrams for the born process,
1631 as well as for all the real emission processes"""
1632
1633 filename = 'born.ps'
1634 plot = draw.MultiEpsDiagramDrawer(
1635 matrix_element.born_me.get('base_amplitude').get('diagrams'),
1636 filename,
1637 model=matrix_element.born_me.get('processes')[0].get('model'),
1638 amplitude=True, diagram_type='born')
1639 plot.draw()
1640
1641 for n, fksreal in enumerate(matrix_element.real_processes):
1642 filename = 'matrix_%d.ps' % (n + 1)
1643 plot = draw.MultiEpsDiagramDrawer(fksreal.matrix_element.\
1644 get('base_amplitude').get('diagrams'),
1645 filename,
1646 model=fksreal.matrix_element.\
1647 get('processes')[0].get('model'),
1648 amplitude=True, diagram_type='real')
1649 plot.draw()
1650
1651
1653 """writes the matrix_i.f files which contain the real matrix elements"""
1654
1655 sqsorders_list = []
1656 for n, fksreal in enumerate(matrix_element.real_processes):
1657 filename = 'matrix_%d.f' % (n + 1)
1658 ncalls, ncolors, nsplitorders, nsqsplitorders = \
1659 self.write_split_me_fks(\
1660 writers.FortranWriter(filename),
1661 fksreal.matrix_element,
1662 fortran_model, 'real', "%d" % (n+1))
1663 sqsorders_list.append(nsqsplitorders)
1664 return sqsorders_list
1665
1666
1667
1669 """write a wrapper for the extra born counterterms that may be
1670 present e.g. if the process has gluon at the born
1671 """
1672
1673 replace_dict = {'ncnt': max(len(cnt_me_list),1)}
1674
1675
1676
1677 if not cnt_me_list:
1678 replace_dict['cnt_charge_lines'] = \
1679 "data (cnt_charge(1,i), i=1,nexternalB) / nexternalB * 0d0 /"
1680 replace_dict['cnt_color_lines'] = \
1681 "data (cnt_color(1,i), i=1,nexternalB) / nexternalB * 1 /"
1682 replace_dict['cnt_pdg_lines'] = \
1683 "data (cnt_pdg(1,i), i=1,nexternalB) / nexternalB * 0 /"
1684
1685 replace_dict['iflines'] = ''
1686
1687 else:
1688 iflines = ''
1689 cnt_charge_lines = ''
1690 cnt_color_lines = ''
1691 cnt_pdg_lines = ''
1692
1693 for i, cnt in enumerate(cnt_me_list):
1694 icnt = i+1
1695 if not iflines:
1696 iflines = \
1697 'if (icnt.eq.%d) then\n call sborn_cnt%d(p,cnts)\n' % (icnt, icnt)
1698 else:
1699 iflines += \
1700 'else if (icnt.eq.%d) then\n call sborn_cnt%d(p,cnts)\n' % (icnt, icnt)
1701
1702 cnt_charge_lines += 'data (cnt_charge(%d,i), i=1,nexternalB) / %s /\n' % \
1703 (icnt, ', '.join(['%19.15fd0' % l['charge'] for l in cnt['processes'][0]['legs']]))
1704 cnt_color_lines += 'data (cnt_color(%d,i), i=1,nexternalB) / %s /\n' % \
1705 (icnt, ', '.join(['%d' % l['color'] for l in cnt['processes'][0]['legs']]))
1706 cnt_pdg_lines += 'data (cnt_pdg(%d,i), i=1,nexternalB) / %s /\n' % \
1707 (icnt, ', '.join(['%d' % l['id'] for l in cnt['processes'][0]['legs']]))
1708
1709 iflines += 'endif\n'
1710
1711 replace_dict['iflines'] = iflines
1712 replace_dict['cnt_color_lines'] = cnt_color_lines
1713 replace_dict['cnt_charge_lines'] = cnt_charge_lines
1714 replace_dict['cnt_pdg_lines'] = cnt_pdg_lines
1715
1716 file = open(pjoin(_file_path, \
1717 'iolibs/template_files/extra_cnt_wrapper_fks.inc')).read()
1718
1719 file = file % replace_dict
1720
1721
1722 writer.writelines(file)
1723
1724
1725
1726
1727
1728
1729 - def write_split_me_fks(self, writer, matrix_element, fortran_model,
1730 proc_type, proc_prefix='',start_dict={}):
1731 """Export a matrix element using the split_order format
1732 proc_type is either born, bhel, real or cnt,
1733 start_dict contains additional infos to be put in replace_dict"""
1734
1735 if not matrix_element.get('processes') or \
1736 not matrix_element.get('diagrams'):
1737 return 0
1738
1739 if not isinstance(writer, writers.FortranWriter):
1740 raise writers.FortranWriter.FortranWriterError(\
1741 "writer not FortranWriter")
1742
1743 if not self.opt.has_key('sa_symmetry'):
1744 self.opt['sa_symmetry']=False
1745
1746
1747 writers.FortranWriter.downcase = False
1748
1749 replace_dict = {'global_variable':'', 'amp2_lines':''}
1750 if proc_prefix:
1751 replace_dict['proc_prefix'] = proc_prefix
1752
1753
1754 for k,v in start_dict.items():
1755 replace_dict[k] = v
1756
1757
1758 helas_calls = fortran_model.get_matrix_element_calls(\
1759 matrix_element)
1760 replace_dict['helas_calls'] = "\n".join(helas_calls)
1761
1762
1763 info_lines = self.get_mg5_info_lines()
1764 replace_dict['info_lines'] = info_lines
1765
1766
1767 if not self.model or any([p.get('spin') in [4,5] for p in self.model.get('particles') if p]):
1768 replace_dict['wavefunctionsize'] = 20
1769 else:
1770 replace_dict['wavefunctionsize'] = 8
1771
1772
1773 process_lines = self.get_process_info_lines(matrix_element)
1774 replace_dict['process_lines'] = process_lines
1775
1776
1777 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial()
1778 replace_dict['nexternal'] = nexternal
1779
1780
1781 ncomb = matrix_element.get_helicity_combinations()
1782 replace_dict['ncomb'] = ncomb
1783
1784
1785 helicity_lines = self.get_helicity_lines(matrix_element)
1786 replace_dict['helicity_lines'] = helicity_lines
1787
1788
1789
1790 replace_dict['den_factor_line'] = self.get_den_factor_line(matrix_element)
1791
1792
1793 ngraphs = matrix_element.get_number_of_amplitudes()
1794 replace_dict['ngraphs'] = ngraphs
1795
1796
1797 nwavefuncs = matrix_element.get_number_of_wavefunctions()
1798 replace_dict['nwavefuncs'] = nwavefuncs
1799
1800
1801 ncolor = max(1, len(matrix_element.get('color_basis')))
1802 replace_dict['ncolor'] = ncolor
1803
1804 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor()
1805
1806
1807 color_data_lines = self.get_color_data_lines(matrix_element)
1808 replace_dict['color_data_lines'] = "\n".join(color_data_lines)
1809
1810 if self.opt['export_format']=='standalone_msP':
1811
1812 amp2_lines = self.get_amp2_lines(matrix_element, [] )
1813 replace_dict['amp2_lines'] = '\n'.join(amp2_lines)
1814 replace_dict['global_variable'] = " Double Precision amp2(NGRAPHS)\n common/to_amps/ amp2\n"
1815
1816
1817 split_orders=matrix_element.get('processes')[0].get('split_orders')
1818 if len(split_orders)==0:
1819 replace_dict['nSplitOrders']=''
1820
1821 jamp_lines = self.get_JAMP_lines(matrix_element)
1822 else:
1823 split_orders_name = matrix_element['processes'][0]['split_orders']
1824 squared_orders, amp_orders = matrix_element.get_split_orders_mapping()
1825 replace_dict['nAmpSplitOrders']=len(amp_orders)
1826 replace_dict['nSqAmpSplitOrders']=len(squared_orders)
1827 replace_dict['nSplitOrders']=len(split_orders)
1828 amp_so = self.get_split_orders_lines(
1829 [amp_order[0] for amp_order in amp_orders],'AMPSPLITORDERS')
1830 sqamp_so = self.get_split_orders_lines(squared_orders,'SQSPLITORDERS')
1831 replace_dict['ampsplitorders']='\n'.join(amp_so)
1832
1833 replace_dict['sqsplitorders']= \
1834 'C the values listed below are for %s\n' % ', '.join(split_orders_name)
1835 replace_dict['sqsplitorders']+='\n'.join(sqamp_so)
1836 jamp_lines = self.get_JAMP_lines_split_order(\
1837 matrix_element,amp_orders,split_order_names=split_orders)
1838
1839 replace_dict['jamp_lines'] = '\n'.join(jamp_lines)
1840
1841 if proc_type=='born':
1842 file = open(pjoin(_file_path, \
1843 'iolibs/template_files/bornmatrix_splitorders_fks.inc')).read()
1844 elif proc_type=='bhel':
1845 file = open(pjoin(_file_path, \
1846 'iolibs/template_files/born_hel_splitorders_fks.inc')).read()
1847 elif proc_type=='real':
1848 file = open(pjoin(_file_path, \
1849 'iolibs/template_files/realmatrix_splitorders_fks.inc')).read()
1850 elif proc_type=='cnt':
1851
1852 file = open(pjoin(_file_path, \
1853 'iolibs/template_files/born_cnt_splitorders_fks.inc')).read()
1854
1855 file = file % replace_dict
1856
1857
1858 writer.writelines(file)
1859
1860 return len(filter(lambda call: call.find('#') != 0, helas_calls)), ncolor, \
1861 replace_dict['nAmpSplitOrders'], replace_dict['nSqAmpSplitOrders']
1862
1863
1865 """writes the parton_lum_i.f files which contain the real matrix elements.
1866 If no real emission existst, write the one for the born"""
1867
1868 if matrix_element.real_processes:
1869 for n, fksreal in enumerate(matrix_element.real_processes):
1870 filename = 'parton_lum_%d.f' % (n + 1)
1871 self.write_pdf_file(writers.FortranWriter(filename),
1872 fksreal.matrix_element, n + 1,
1873 fortran_model)
1874 else:
1875 filename = 'parton_lum_0.f'
1876 self.write_pdf_file(writers.FortranWriter(filename),
1877 matrix_element.born_me, 0,
1878 fortran_model)
1879
1880
1882 """generates the files needed for the born amplitude in the P* directory, which will
1883 be needed by the P* directories"""
1884 pathdir = os.getcwd()
1885
1886 born_me = matrix_element.born_me
1887
1888
1889 filename = 'born_conf.inc'
1890 nconfigs, mapconfigs, s_and_t_channels = \
1891 self.write_born_conf_file(
1892 writers.FortranWriter(filename),
1893 born_me, fortran_model)
1894
1895 filename = 'born_props.inc'
1896 self.write_born_props_file(
1897 writers.FortranWriter(filename),
1898 born_me, s_and_t_channels, fortran_model)
1899
1900 filename = 'born_leshouche.inc'
1901 nflows = self.write_born_leshouche_file(writers.FortranWriter(filename),
1902 born_me, fortran_model)
1903
1904 filename = 'born_nhel.inc'
1905 self.write_born_nhel_file(writers.FortranWriter(filename),
1906 born_me, nflows, fortran_model)
1907
1908 filename = 'born_ngraphs.inc'
1909 self.write_ngraphs_file(writers.FortranWriter(filename), nconfigs)
1910
1911 filename = 'ncombs.inc'
1912 self.write_ncombs_file(writers.FortranWriter(filename),
1913 born_me, fortran_model)
1914
1915 filename = 'born_coloramps.inc'
1916 self.write_coloramps_file(writers.FortranWriter(filename),
1917 mapconfigs, born_me, fortran_model)
1918
1919
1920 sqsorders_list = []
1921 filename = 'born.f'
1922
1923 born_dict = {}
1924 born_dict['nconfs'] = max(len(matrix_element.get_fks_info_list()),1)
1925
1926 den_factor_lines = self.get_den_factor_lines(matrix_element)
1927 born_dict['den_factor_lines'] = '\n'.join(den_factor_lines)
1928
1929 ij_lines = self.get_ij_lines(matrix_element)
1930 born_dict['ij_lines'] = '\n'.join(ij_lines)
1931
1932
1933 if not matrix_element.real_processes:
1934 born_dict['skip_amp_cnt'] = 'goto 999 ! LOonly, no need to compute amp_split_cnt'
1935 else:
1936 born_dict['skip_amp_cnt'] = ''
1937
1938 calls_born, ncolor_born, norders, nsqorders = \
1939 self.write_split_me_fks(writers.FortranWriter(filename),
1940 born_me, fortran_model, 'born', '',
1941 start_dict = born_dict)
1942
1943 filename = 'born_maxamps.inc'
1944 maxamps = len(matrix_element.get('diagrams'))
1945 maxflows = ncolor_born
1946 self.write_maxamps_file(writers.FortranWriter(filename),
1947 maxamps,
1948 maxflows,
1949 max([len(matrix_element.get('processes')) for me in \
1950 matrix_element.born_me]),1)
1951
1952
1953
1954
1955 filename = 'born_hel.f'
1956 calls_born, ncolor_born, norders, nsqorders = \
1957 self.write_split_me_fks(writers.FortranWriter(filename),
1958 born_me, fortran_model, 'bhel', '',
1959 start_dict = born_dict)
1960
1961 sqsorders_list.append(nsqorders)
1962
1963 self.color_link_files = []
1964 for j in range(len(matrix_element.color_links)):
1965 filename = 'b_sf_%3.3d.f' % (j + 1)
1966 self.color_link_files.append(filename)
1967 self.write_b_sf_fks(writers.FortranWriter(filename),
1968 matrix_element, j,
1969 fortran_model)
1970
1971
1972 filename = 'sborn_sf.f'
1973 self.write_sborn_sf(writers.FortranWriter(filename),
1974 matrix_element,
1975 nsqorders,
1976 fortran_model)
1977
1978
1979
1981 """Generates the library for computing the loop matrix elements
1982 necessary for this process using the OLP specified."""
1983
1984
1985 virtual_path = pjoin(export_path,'OLP_virtuals')
1986 if not os.path.exists(virtual_path):
1987 os.makedirs(virtual_path)
1988 filename = os.path.join(virtual_path,'OLE_order.lh')
1989 self.write_lh_order(filename, process_list, OLP)
1990
1991 fail_msg='Generation of the virtuals with %s failed.\n'%OLP+\
1992 'Please check the virt_generation.log file in %s.'\
1993 %str(pjoin(virtual_path,'virt_generation.log'))
1994
1995
1996 if OLP=='GoSam':
1997 cp(pjoin(self.mgme_dir,'Template','loop_material','OLP_specifics',
1998 'GoSam','makevirt'),pjoin(virtual_path,'makevirt'))
1999 cp(pjoin(self.mgme_dir,'Template','loop_material','OLP_specifics',
2000 'GoSam','gosam.rc'),pjoin(virtual_path,'gosam.rc'))
2001 ln(pjoin(export_path,'Cards','param_card.dat'),virtual_path)
2002
2003 logger.info('Generating the loop matrix elements with %s...'%OLP)
2004 virt_generation_log = \
2005 open(pjoin(virtual_path,'virt_generation.log'), 'w')
2006 retcode = subprocess.call(['./makevirt'],cwd=virtual_path,
2007 stdout=virt_generation_log, stderr=virt_generation_log)
2008 virt_generation_log.close()
2009
2010 possible_other_extensions = ['so','dylib']
2011 shared_lib_ext='so'
2012 for ext in possible_other_extensions:
2013 if os.path.isfile(pjoin(virtual_path,'Virtuals','lib',
2014 'libgolem_olp.'+ext)):
2015 shared_lib_ext = ext
2016
2017
2018 files_to_check = ['olp_module.mod',str(pjoin('lib',
2019 'libgolem_olp.'+shared_lib_ext))]
2020 if retcode != 0 or any([not os.path.exists(pjoin(virtual_path,
2021 'Virtuals',f)) for f in files_to_check]):
2022 raise fks_common.FKSProcessError(fail_msg)
2023
2024 ln(pjoin(virtual_path,'Virtuals','lib','libgolem_olp.'+shared_lib_ext),
2025 pjoin(export_path,'lib'))
2026
2027
2028 make_opts_content=open(pjoin(export_path,'Source','make_opts')).read()
2029 make_opts=open(pjoin(export_path,'Source','make_opts'),'w')
2030 if OLP=='GoSam':
2031 if platform.system().lower()=='darwin':
2032
2033
2034 make_opts_content=make_opts_content.replace('libOLP=',
2035 'libOLP=-Wl,-lgolem_olp')
2036 else:
2037
2038
2039
2040
2041
2042
2043
2044 make_opts_content=make_opts_content.replace('libOLP=',
2045 'libOLP=-Wl,-rpath='+str(pjoin(export_path,'lib'))+' -lgolem_olp')
2046
2047
2048 make_opts.write(make_opts_content)
2049 make_opts.close()
2050
2051
2052
2053
2054
2055 proc_to_label = self.parse_contract_file(
2056 pjoin(virtual_path,'OLE_order.olc'))
2057
2058 self.write_BinothLHA_inc(process_list,proc_to_label,\
2059 pjoin(export_path,'SubProcesses'))
2060
2061
2062 ln(pjoin(virtual_path,'OLE_order.olc'),pjoin(export_path,'SubProcesses'))
2063
2065 """ Write the file Binoth_proc.inc in each SubProcess directory so as
2066 to provide the right process_label to use in the OLP call to get the
2067 loop matrix element evaluation. The proc_to_label is the dictionary of
2068 the format of the one returned by the function parse_contract_file."""
2069
2070 for proc in processes:
2071 name = "P%s"%proc.shell_string()
2072 proc_pdgs=(tuple([leg.get('id') for leg in proc.get('legs') if \
2073 not leg.get('state')]),
2074 tuple([leg.get('id') for leg in proc.get('legs') if \
2075 leg.get('state')]))
2076 incFile = open(pjoin(SubProcPath, name,'Binoth_proc.inc'),'w')
2077 try:
2078 incFile.write(
2079 """ INTEGER PROC_LABEL
2080 PARAMETER (PROC_LABEL=%d)"""%(proc_to_label[proc_pdgs]))
2081 except KeyError:
2082 raise fks_common.FKSProcessError('Could not found the target'+\
2083 ' process %s > %s in '%(str(proc_pdgs[0]),str(proc_pdgs[1]))+\
2084 ' the proc_to_label argument in write_BinothLHA_inc.')
2085 incFile.close()
2086
2088 """ Parses the BLHA contract file, make sure all parameters could be
2089 understood by the OLP and return a mapping of the processes (characterized
2090 by the pdg's of the initial and final state particles) to their process
2091 label. The format of the mapping is {((in_pdgs),(out_pdgs)):proc_label}.
2092 """
2093
2094 proc_def_to_label = {}
2095
2096 if not os.path.exists(contract_file_path):
2097 raise fks_common.FKSProcessError('Could not find the contract file'+\
2098 ' OLE_order.olc in %s.'%str(contract_file_path))
2099
2100 comment_re=re.compile(r"^\s*#")
2101 proc_def_re=re.compile(
2102 r"^(?P<in_pdgs>(\s*-?\d+\s*)+)->(?P<out_pdgs>(\s*-?\d+\s*)+)\|"+
2103 r"\s*(?P<proc_class>\d+)\s*(?P<proc_label>\d+)\s*$")
2104 line_OK_re=re.compile(r"^.*\|\s*OK")
2105 for line in file(contract_file_path):
2106
2107 if not comment_re.match(line) is None:
2108 continue
2109
2110 proc_def = proc_def_re.match(line)
2111 if not proc_def is None:
2112 if int(proc_def.group('proc_class'))!=1:
2113 raise fks_common.FKSProcessError(
2114 'aMCatNLO can only handle loop processes generated by the OLP which have only '+\
2115 ' process class attribute. Found %s instead in: \n%s'\
2116 %(proc_def.group('proc_class'),line))
2117 in_pdgs=tuple([int(in_pdg) for in_pdg in \
2118 proc_def.group('in_pdgs').split()])
2119 out_pdgs=tuple([int(out_pdg) for out_pdg in \
2120 proc_def.group('out_pdgs').split()])
2121 proc_def_to_label[(in_pdgs,out_pdgs)]=\
2122 int(proc_def.group('proc_label'))
2123 continue
2124
2125 if line_OK_re.match(line) is None:
2126 raise fks_common.FKSProcessError(
2127 'The OLP could not process the following line: \n%s'%line)
2128
2129 return proc_def_to_label
2130
2131
2133 """writes the V**** directory inside the P**** directories specified in
2134 dir_name"""
2135
2136 cwd = os.getcwd()
2137
2138 matrix_element = loop_matrix_element
2139
2140
2141 dirpath = os.path.join(dir_name, 'MadLoop5_resources')
2142 try:
2143 os.mkdir(dirpath)
2144 except os.error as error:
2145 logger.warning(error.strerror + " " + dirpath)
2146
2147
2148 name = "V%s" % matrix_element.get('processes')[0].shell_string()
2149 dirpath = os.path.join(dir_name, name)
2150
2151 try:
2152 os.mkdir(dirpath)
2153 except os.error as error:
2154 logger.warning(error.strerror + " " + dirpath)
2155
2156 try:
2157 os.chdir(dirpath)
2158 except os.error:
2159 logger.error('Could not cd to directory %s' % dirpath)
2160 return 0
2161
2162 logger.info('Creating files in directory %s' % name)
2163
2164
2165 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial()
2166
2167 calls=self.write_loop_matrix_element_v4(None,matrix_element,fortran_model)
2168
2169 filename = 'born_matrix.f'
2170 calls = self.write_bornmatrix(
2171 writers.FortranWriter(filename),
2172 matrix_element,
2173 fortran_model)
2174
2175 filename = 'nexternal.inc'
2176 self.write_nexternal_file(writers.FortranWriter(filename),
2177 nexternal, ninitial)
2178
2179 filename = 'pmass.inc'
2180 self.write_pmass_file(writers.FortranWriter(filename),
2181 matrix_element)
2182
2183 filename = 'ngraphs.inc'
2184 self.write_ngraphs_file(writers.FortranWriter(filename),
2185 len(matrix_element.get_all_amplitudes()))
2186
2187 filename = "loop_matrix.ps"
2188 plot = draw.MultiEpsDiagramDrawer(base_objects.DiagramList(
2189 matrix_element.get('base_amplitude').get('loop_diagrams')[:1000]),
2190 filename,
2191 model=matrix_element.get('processes')[0].get('model'),
2192 amplitude='')
2193 logger.info("Drawing loop Feynman diagrams for " + \
2194 matrix_element.get('processes')[0].nice_string(print_weighted=False))
2195 plot.draw()
2196
2197 filename = "born_matrix.ps"
2198 plot = draw.MultiEpsDiagramDrawer(matrix_element.get('base_amplitude').\
2199 get('born_diagrams'),filename,model=matrix_element.get('processes')[0].\
2200 get('model'),amplitude='')
2201 logger.info("Generating born Feynman diagrams for " + \
2202 matrix_element.get('processes')[0].nice_string(print_weighted=False))
2203 plot.draw()
2204
2205
2206
2207
2208 self.write_global_specs(matrix_element, output_path=pjoin(dirpath,'global_specs.inc'))
2209 open('unique_id.inc','w').write(
2210 """ integer UNIQUE_ID
2211 parameter(UNIQUE_ID=1)""")
2212
2213 linkfiles = ['coupl.inc', 'mp_coupl.inc', 'mp_coupl_same_name.inc',
2214 'cts_mprec.h', 'cts_mpc.h', 'MadLoopParamReader.f',
2215 'MadLoopCommons.f','MadLoopParams.inc']
2216
2217
2218 ln(pjoin(os.path.pardir,os.path.pardir,'MadLoopParams.dat'),
2219 pjoin('..','MadLoop5_resources'))
2220
2221 for file in linkfiles:
2222 ln('../../%s' % file)
2223
2224 os.system("ln -s ../../makefile_loop makefile")
2225
2226 linkfiles = ['mpmodule.mod']
2227
2228 for file in linkfiles:
2229 ln('../../../lib/%s' % file)
2230
2231 linkfiles = ['coef_specs.inc']
2232
2233 for file in linkfiles:
2234 ln('../../../Source/DHELAS/%s' % file)
2235
2236
2237 os.chdir(cwd)
2238
2239 if not calls:
2240 calls = 0
2241 return calls
2242
2243
2244
2245
2246
2247
2249 """Creates the OLE_order.lh file. This function should be edited according
2250 to the OLP which is used. For now it is generic."""
2251
2252
2253 if len(process_list)==0:
2254 raise fks_common.FKSProcessError('No matrix elements provided to '+\
2255 'the function write_lh_order.')
2256 return
2257
2258
2259
2260 orders = process_list[0].get('orders')
2261 if not orders:
2262 orders = {o : v / 2 for (o, v) in process_list[0].get('squared_orders').items()}
2263 if 'QED' in orders.keys() and 'QCD' in orders.keys():
2264 QED=orders['QED']
2265 QCD=orders['QCD']
2266 elif 'QED' in orders.keys():
2267 QED=orders['QED']
2268 QCD=0
2269 elif 'QCD' in orders.keys():
2270 QED=0
2271 QCD=orders['QCD']
2272 else:
2273 QED, QCD = fks_common.get_qed_qcd_orders_from_weighted(\
2274 len(process_list[0].get('legs')),
2275 orders['WEIGHTED'])
2276
2277 replace_dict = {}
2278 replace_dict['mesq'] = 'CHaveraged'
2279 replace_dict['corr'] = ' '.join(process_list[0].\
2280 get('perturbation_couplings'))
2281 replace_dict['irreg'] = 'CDR'
2282 replace_dict['aspow'] = QCD
2283 replace_dict['aepow'] = QED
2284 replace_dict['modelfile'] = './param_card.dat'
2285 replace_dict['params'] = 'alpha_s'
2286 proc_lines=[]
2287 for proc in process_list:
2288 proc_lines.append('%s -> %s' % \
2289 (' '.join(str(l['id']) for l in proc['legs'] if not l['state']),
2290 ' '.join(str(l['id']) for l in proc['legs'] if l['state'])))
2291 replace_dict['pdgs'] = '\n'.join(proc_lines)
2292 replace_dict['symfin'] = 'Yes'
2293 content = \
2294 "#OLE_order written by MadGraph5_aMC@NLO\n\
2295 \n\
2296 MatrixElementSquareType %(mesq)s\n\
2297 CorrectionType %(corr)s\n\
2298 IRregularisation %(irreg)s\n\
2299 AlphasPower %(aspow)d\n\
2300 AlphaPower %(aepow)d\n\
2301 NJetSymmetrizeFinal %(symfin)s\n\
2302 ModelFile %(modelfile)s\n\
2303 Parameters %(params)s\n\
2304 \n\
2305 # process\n\
2306 %(pdgs)s\n\
2307 " % replace_dict
2308
2309 file = open(filename, 'w')
2310 file.write(content)
2311 file.close
2312 return
2313
2314
2315
2316
2317
2318
2320 """Export a matrix element to a born.f file in MadFKS format"""
2321
2322 matrix_element = fksborn.born_me
2323
2324 if not matrix_element.get('processes') or \
2325 not matrix_element.get('diagrams'):
2326 return 0
2327
2328 if not isinstance(writer, writers.FortranWriter):
2329 raise writers.FortranWriter.FortranWriterError(\
2330 "writer not FortranWriter")
2331
2332 writers.FortranWriter.downcase = False
2333
2334 replace_dict = {}
2335
2336
2337 info_lines = self.get_mg5_info_lines()
2338 replace_dict['info_lines'] = info_lines
2339
2340
2341 process_lines = self.get_process_info_lines(matrix_element)
2342 replace_dict['process_lines'] = process_lines
2343
2344
2345
2346 ncomb = matrix_element.get_helicity_combinations()
2347 replace_dict['ncomb'] = ncomb
2348
2349
2350 helicity_lines = self.get_helicity_lines(matrix_element)
2351 replace_dict['helicity_lines'] = helicity_lines
2352
2353
2354 ic_line = self.get_ic_line(matrix_element)
2355 replace_dict['ic_line'] = ic_line
2356
2357
2358
2359
2360
2361
2362 ngraphs = matrix_element.get_number_of_amplitudes()
2363 replace_dict['ngraphs'] = ngraphs
2364
2365
2366 nwavefuncs = matrix_element.get_number_of_wavefunctions()
2367 replace_dict['nwavefuncs'] = nwavefuncs
2368
2369
2370 ncolor = max(1, len(matrix_element.get('color_basis')))
2371 replace_dict['ncolor'] = ncolor
2372
2373
2374 color_data_lines = self.get_color_data_lines(matrix_element)
2375 replace_dict['color_data_lines'] = "\n".join(color_data_lines)
2376
2377
2378 helas_calls = fortran_model.get_matrix_element_calls(\
2379 matrix_element)
2380 replace_dict['helas_calls'] = "\n".join(helas_calls)
2381
2382
2383 amp2_lines = self.get_amp2_lines(matrix_element)
2384 replace_dict['amp2_lines'] = '\n'.join(amp2_lines)
2385
2386
2387 jamp_lines = self.get_JAMP_lines(matrix_element)
2388 replace_dict['jamp_lines'] = '\n'.join(jamp_lines)
2389
2390
2391 if not self.model or any([p.get('spin') in [4,5] for p in self.model.get('particles') if p]):
2392 replace_dict['wavefunctionsize'] = 20
2393 else:
2394 replace_dict['wavefunctionsize'] = 8
2395
2396
2397 ij_lines = self.get_ij_lines(fksborn)
2398 replace_dict['ij_lines'] = '\n'.join(ij_lines)
2399
2400
2401 den_factor_lines = self.get_den_factor_lines(fksborn)
2402 replace_dict['den_factor_lines'] = '\n'.join(den_factor_lines)
2403
2404
2405 replace_dict['nconfs'] = max(len(fksborn.get_fks_info_list()),1)
2406
2407 file = open(os.path.join(_file_path, \
2408 'iolibs/template_files/born_fks.inc')).read()
2409 file = file % replace_dict
2410
2411
2412 writer.writelines(file)
2413 logger.warning('This function should not be called')
2414
2415 return len(filter(lambda call: call.find('#') != 0, helas_calls)), ncolor
2416
2417
2419 """Export a matrix element to a born_hel.f file in MadFKS format"""
2420
2421 matrix_element = fksborn.born_me
2422
2423 if not matrix_element.get('processes') or \
2424 not matrix_element.get('diagrams'):
2425 return 0
2426
2427 if not isinstance(writer, writers.FortranWriter):
2428 raise writers.FortranWriter.FortranWriterError(\
2429 "writer not FortranWriter")
2430
2431 writers.FortranWriter.downcase = False
2432
2433 replace_dict = {}
2434
2435
2436 info_lines = self.get_mg5_info_lines()
2437 replace_dict['info_lines'] = info_lines
2438
2439
2440 process_lines = self.get_process_info_lines(matrix_element)
2441 replace_dict['process_lines'] = process_lines
2442
2443
2444
2445 ncomb = matrix_element.get_helicity_combinations()
2446 replace_dict['ncomb'] = ncomb
2447
2448
2449 helicity_lines = self.get_helicity_lines(matrix_element)
2450 replace_dict['helicity_lines'] = helicity_lines
2451
2452
2453 ic_line = self.get_ic_line(matrix_element)
2454 replace_dict['ic_line'] = ic_line
2455
2456
2457
2458
2459
2460
2461 ngraphs = matrix_element.get_number_of_amplitudes()
2462 replace_dict['ngraphs'] = ngraphs
2463
2464
2465 nwavefuncs = matrix_element.get_number_of_wavefunctions()
2466 replace_dict['nwavefuncs'] = nwavefuncs
2467
2468
2469 ncolor = max(1, len(matrix_element.get('color_basis')))
2470 replace_dict['ncolor'] = ncolor
2471
2472
2473 color_data_lines = self.get_color_data_lines(matrix_element)
2474 replace_dict['color_data_lines'] = "\n".join(color_data_lines)
2475
2476
2477 amp2_lines = self.get_amp2_lines(matrix_element)
2478 replace_dict['amp2_lines'] = '\n'.join(amp2_lines)
2479
2480
2481 jamp_lines = self.get_JAMP_lines(matrix_element)
2482 replace_dict['jamp_lines'] = '\n'.join(jamp_lines)
2483
2484
2485 den_factor_lines = self.get_den_factor_lines(fksborn)
2486 replace_dict['den_factor_lines'] = '\n'.join(den_factor_lines)
2487
2488
2489 replace_dict['nconfs'] = len(fksborn.get_fks_info_list())
2490
2491 file = open(os.path.join(_file_path, \
2492 'iolibs/template_files/born_fks_hel.inc')).read()
2493 file = file % replace_dict
2494
2495
2496 writer.writelines(file)
2497
2498 return
2499
2500
2501
2502
2503
2504
2506 """Creates the sborn_sf.f file, containing the calls to the different
2507 color linked borns"""
2508
2509 replace_dict = {}
2510 color_links = me.color_links
2511 nlinks = len(color_links)
2512
2513 replace_dict['nsqorders'] = nsqorders
2514 replace_dict['iflines_col'] = ''
2515
2516 for i, c_link in enumerate(color_links):
2517 ilink = i+1
2518 iff = {True : 'if', False : 'elseif'}[i==0]
2519 m, n = c_link['link']
2520 if m != n:
2521 replace_dict['iflines_col'] += \
2522 "c link partons %(m)d and %(n)d \n\
2523 %(iff)s ((m.eq.%(m)d .and. n.eq.%(n)d).or.(m.eq.%(n)d .and. n.eq.%(m)d)) then \n\
2524 call sb_sf_%(ilink)3.3d(p_born,wgt_col)\n" \
2525 % {'m':m, 'n': n, 'iff': iff, 'ilink': ilink}
2526 else:
2527 replace_dict['iflines_col'] += \
2528 "c link partons %(m)d and %(n)d \n\
2529 %(iff)s (m.eq.%(m)d .and. n.eq.%(n)d) then \n\
2530 call sb_sf_%(ilink)3.3d(p_born,wgt_col)\n" \
2531 % {'m':m, 'n': n, 'iff': iff, 'ilink': ilink}
2532
2533
2534 if replace_dict['iflines_col']:
2535 replace_dict['iflines_col'] += 'endif\n'
2536 else:
2537
2538 replace_dict['iflines_col'] += 'write(*,*) \'Error in sborn_sf, no color links\'\nstop\n'
2539
2540 file = open(os.path.join(_file_path, \
2541 'iolibs/template_files/sborn_sf_fks.inc')).read()
2542 file = file % replace_dict
2543 writer.writelines(file)
2544
2545
2547 """return the product of charges (as a string) of particles m and n.
2548 Special sign conventions may be needed for initial/final state particles
2549 """
2550 return charge_list[n - 1] * charge_list[m - 1]
2551
2552
2553
2554
2555
2556
2558 """Create the b_sf_xxx.f file for the ilink-th soft linked born
2559 """
2560
2561 matrix_element = copy.copy(fksborn.born_me)
2562
2563 if not matrix_element.get('processes') or \
2564 not matrix_element.get('diagrams'):
2565 return 0
2566
2567 if not isinstance(writer, writers.FortranWriter):
2568 raise writers.FortranWriter.FortranWriterError(\
2569 "writer not FortranWriter")
2570
2571 writers.FortranWriter.downcase = False
2572
2573 link = fksborn.color_links[ilink]
2574
2575 replace_dict = {}
2576
2577 replace_dict['ilink'] = ilink + 1
2578
2579
2580 info_lines = self.get_mg5_info_lines()
2581 replace_dict['info_lines'] = info_lines
2582
2583
2584 process_lines = self.get_process_info_lines(matrix_element)
2585 replace_dict['process_lines'] = process_lines + \
2586 "\nc spectators: %d %d \n" % tuple(link['link'])
2587
2588
2589 ncomb = matrix_element.get_helicity_combinations()
2590 replace_dict['ncomb'] = ncomb
2591
2592
2593 helicity_lines = self.get_helicity_lines(matrix_element)
2594 replace_dict['helicity_lines'] = helicity_lines
2595
2596
2597 ic_line = self.get_ic_line(matrix_element)
2598 replace_dict['ic_line'] = ic_line
2599
2600
2601 den_factor_lines = self.get_den_factor_lines(fksborn)
2602 replace_dict['den_factor_lines'] = '\n'.join(den_factor_lines)
2603
2604
2605 ngraphs = matrix_element.get_number_of_amplitudes()
2606 replace_dict['ngraphs'] = ngraphs
2607
2608
2609 nwavefuncs = matrix_element.get_number_of_wavefunctions()
2610 replace_dict['nwavefuncs'] = nwavefuncs
2611
2612
2613 ncolor1 = max(1, len(link['orig_basis']))
2614 replace_dict['ncolor1'] = ncolor1
2615 ncolor2 = max(1, len(link['link_basis']))
2616 replace_dict['ncolor2'] = ncolor2
2617
2618
2619 color_data_lines = self.get_color_data_lines_from_color_matrix(\
2620 link['link_matrix'])
2621 replace_dict['color_data_lines'] = "\n".join(color_data_lines)
2622
2623
2624 amp2_lines = self.get_amp2_lines(matrix_element)
2625 replace_dict['amp2_lines'] = '\n'.join(amp2_lines)
2626
2627
2628
2629
2630 split_orders=matrix_element.get('processes')[0].get('split_orders')
2631 if len(split_orders)==0:
2632 replace_dict['nSplitOrders']=''
2633
2634 jamp_lines = self.get_JAMP_lines(matrix_element)
2635 else:
2636 squared_orders, amp_orders = matrix_element.get_split_orders_mapping()
2637 replace_dict['nAmpSplitOrders']=len(amp_orders)
2638 replace_dict['nSqAmpSplitOrders']=len(squared_orders)
2639 replace_dict['nSplitOrders']=len(split_orders)
2640 amp_so = self.get_split_orders_lines(
2641 [amp_order[0] for amp_order in amp_orders],'AMPSPLITORDERS')
2642 sqamp_so = self.get_split_orders_lines(squared_orders,'SQSPLITORDERS')
2643 replace_dict['ampsplitorders']='\n'.join(amp_so)
2644 replace_dict['sqsplitorders']='\n'.join(sqamp_so)
2645 jamp_lines = self.get_JAMP_lines_split_order(\
2646 matrix_element,amp_orders,split_order_names=split_orders)
2647
2648 replace_dict['jamp1_lines'] = '\n'.join(jamp_lines).replace('JAMP', 'JAMP1')
2649
2650 matrix_element.set('color_basis', link['link_basis'] )
2651 if len(split_orders)==0:
2652 replace_dict['nSplitOrders']=''
2653
2654 jamp_lines = self.get_JAMP_lines(matrix_element)
2655 else:
2656 jamp_lines = self.get_JAMP_lines_split_order(\
2657 matrix_element,amp_orders,split_order_names=split_orders)
2658
2659 replace_dict['jamp2_lines'] = '\n'.join(jamp_lines).replace('JAMP','JAMP2')
2660
2661
2662
2663 replace_dict['nconfs'] = len(fksborn.get_fks_info_list())
2664
2665 file = open(os.path.join(_file_path, \
2666 'iolibs/template_files/b_sf_xxx_splitorders_fks.inc')).read()
2667 file = file % replace_dict
2668
2669
2670 writer.writelines(file)
2671
2672 return 0 , ncolor1
2673
2674
2675
2676
2677
2679 """Write the born_nhel.inc file for MG4. Write the maximum as they are
2680 typically used for setting array limits."""
2681
2682 ncomb = me.get_helicity_combinations()
2683 file = "integer max_bhel, max_bcol \n"
2684 file += "parameter (max_bhel=%d)\nparameter(max_bcol=%d)" % \
2685 (ncomb, nflows)
2686
2687
2688 writer.writelines(file)
2689
2690 return True
2691
2692
2693
2694
2696 """Writes the content of nFKSconfigs.inc, which just gives the
2697 total FKS dirs as a parameter.
2698 nFKSconfigs is always >=1 (use a fake configuration for LOonly)"""
2699 replace_dict = {}
2700 replace_dict['nconfs'] = max(len(fksborn.get_fks_info_list()), 1)
2701 content = \
2702 """ INTEGER FKS_CONFIGS
2703 PARAMETER (FKS_CONFIGS=%(nconfs)d)
2704
2705 """ % replace_dict
2706
2707 writer.writelines(content)
2708
2709
2710
2711
2712
2714 """Writes the content of fks_info.inc, which lists the informations on the
2715 possible splittings of the born ME.
2716 nconfs is always >=1 (use a fake configuration for LOonly).
2717 The fake configuration use an 'antigluon' (id -21, color=8) as i_fks and
2718 the last colored particle as j_fks."""
2719
2720 replace_dict = {}
2721 fks_info_list = fksborn.get_fks_info_list()
2722 split_orders = fksborn.born_me['processes'][0]['split_orders']
2723 replace_dict['nconfs'] = max(len(fks_info_list), 1)
2724 replace_dict['nsplitorders'] = len(split_orders)
2725 replace_dict['splitorders_name'] = ', '.join(split_orders)
2726
2727 bool_dict = {True: '.true.', False: '.false.'}
2728 split_types_return = set(sum([info['fks_info']['splitting_type'] for info in fks_info_list], []))
2729
2730
2731 if len(fks_info_list) > 0:
2732 replace_dict['fks_i_values'] = ', '.join(['%d' % info['fks_info']['i'] \
2733 for info in fks_info_list])
2734 replace_dict['fks_j_values'] = ', '.join(['%d' % info['fks_info']['j'] \
2735 for info in fks_info_list])
2736 replace_dict['extra_cnt_values'] = ', '.join(['%d' % (info['fks_info']['extra_cnt_index'] + 1) \
2737 for info in fks_info_list])
2738
2739 isplitorder_born = []
2740 isplitorder_cnt = []
2741 for info in fks_info_list:
2742
2743 if info['fks_info']['extra_cnt_index'] == -1:
2744 isplitorder_born.append(0)
2745 isplitorder_cnt.append(0)
2746 else:
2747
2748
2749 isplitorder_born.append(split_orders.index(
2750 info['fks_info']['splitting_type'][0]) + 1)
2751 isplitorder_cnt.append(split_orders.index(
2752 info['fks_info']['splitting_type'][1]) + 1)
2753
2754 replace_dict['isplitorder_born_values'] = \
2755 ', '.join(['%d' % n for n in isplitorder_born])
2756 replace_dict['isplitorder_cnt_values'] = \
2757 ', '.join(['%d' % n for n in isplitorder_cnt])
2758
2759 replace_dict['need_color_links'] = ', '.join(\
2760 [bool_dict[info['fks_info']['need_color_links']] for \
2761 info in fks_info_list ])
2762 replace_dict['need_charge_links'] = ', '.join(\
2763 [bool_dict[info['fks_info']['need_charge_links']] for \
2764 info in fks_info_list ])
2765
2766 col_lines = []
2767 pdg_lines = []
2768 charge_lines = []
2769 fks_j_from_i_lines = []
2770 split_type_lines = []
2771 for i, info in enumerate(fks_info_list):
2772 col_lines.append( \
2773 'DATA (PARTICLE_TYPE_D(%d, IPOS), IPOS=1, NEXTERNAL) / %s /' \
2774 % (i + 1, ', '.join('%d' % col for col in fksborn.real_processes[info['n_me']-1].colors) ))
2775 pdg_lines.append( \
2776 'DATA (PDG_TYPE_D(%d, IPOS), IPOS=1, NEXTERNAL) / %s /' \
2777 % (i + 1, ', '.join('%d' % pdg for pdg in info['pdgs'])))
2778 charge_lines.append(\
2779 'DATA (PARTICLE_CHARGE_D(%d, IPOS), IPOS=1, NEXTERNAL) / %s /'\
2780 % (i + 1, ', '.join('%19.15fd0' % charg\
2781 for charg in fksborn.real_processes[info['n_me']-1].charges) ))
2782 fks_j_from_i_lines.extend(self.get_fks_j_from_i_lines(fksborn.real_processes[info['n_me']-1],\
2783 i + 1))
2784 split_type_lines.append( \
2785 'DATA (SPLIT_TYPE_D (%d, IPOS), IPOS=1, %d) / %s /' %
2786 (i + 1, len(split_orders),
2787 ', '.join([bool_dict[ordd in info['fks_info']['splitting_type']] for ordd in split_orders])))
2788 else:
2789
2790
2791
2792 bornproc = fksborn.born_me.get('processes')[0]
2793 pdgs = [l.get('id') for l in bornproc.get('legs')] + [-21]
2794 colors = [l.get('color') for l in bornproc.get('legs')] + [8]
2795 charges = [l.get('charge') for l in bornproc.get('legs')] + [0.]
2796
2797 fks_i = len(colors)
2798
2799 fks_j=0
2800 for cpos, col in enumerate(colors[:-1]):
2801 if col != 1:
2802 fks_j = cpos+1
2803
2804 if fks_j == 0:
2805 for cpos, chg in enumerate(charges[:-1]):
2806 if chg != 0.:
2807 fks_j = cpos+1
2808
2809 if fks_j==0: fks_j=len(colors)-1
2810
2811
2812
2813 if len(colors) == 4:
2814 fks_j = 2
2815
2816 replace_dict['fks_i_values'] = str(fks_i)
2817 replace_dict['fks_j_values'] = str(fks_j)
2818 replace_dict['extra_cnt_values'] = '0'
2819 replace_dict['isplitorder_born_values'] = '0'
2820 replace_dict['isplitorder_cnt_values'] = '0'
2821
2822 replace_dict['need_color_links'] = '.true.'
2823 replace_dict['need_charge_links'] = '.true.'
2824
2825 col_lines = ['DATA (PARTICLE_TYPE_D(1, IPOS), IPOS=1, NEXTERNAL) / %s /' \
2826 % ', '.join([str(col) for col in colors])]
2827 pdg_lines = ['DATA (PDG_TYPE_D(1, IPOS), IPOS=1, NEXTERNAL) / %s /' \
2828 % ', '.join([str(pdg) for pdg in pdgs])]
2829 charge_lines = ['DATA (PARTICLE_CHARGE_D(1, IPOS), IPOS=1, NEXTERNAL) / %s /' \
2830 % ', '.join('%19.15fd0' % charg for charg in charges)]
2831 fks_j_from_i_lines = ['DATA (FKS_J_FROM_I_D(1, %d, JPOS), JPOS = 0, 1) / 1, %d /' \
2832 % (fks_i, fks_j)]
2833 split_type_lines = [ \
2834 'DATA (SPLIT_TYPE_D (%d, IPOS), IPOS=1, %d) / %s /' %
2835 (1, len(split_orders),
2836 ', '.join([bool_dict[False]] * len(split_orders)))]
2837
2838
2839 replace_dict['col_lines'] = '\n'.join(col_lines)
2840 replace_dict['pdg_lines'] = '\n'.join(pdg_lines)
2841 replace_dict['charge_lines'] = '\n'.join(charge_lines)
2842 replace_dict['fks_j_from_i_lines'] = '\n'.join(fks_j_from_i_lines)
2843 replace_dict['split_type_lines'] = '\n'.join(split_type_lines)
2844
2845 content = open(os.path.join(_file_path, \
2846 'iolibs/template_files/fks_info.inc')).read() % replace_dict
2847
2848 if not isinstance(writer, writers.FortranWriter):
2849 raise writers.FortranWriter.FortranWriterError(\
2850 "writer not FortranWriter")
2851
2852 writers.FortranWriter.downcase = False
2853
2854 writer.writelines(content)
2855
2856 return split_types_return
2857
2858
2859
2860
2861
2863
2864 """Write the auto_dsig.f file for MadFKS, which contains
2865 pdf call information"""
2866
2867 if not matrix_element.get('processes') or \
2868 not matrix_element.get('diagrams'):
2869 return 0
2870
2871 nexternal, ninitial = matrix_element.get_nexternal_ninitial()
2872
2873 if ninitial < 1 or ninitial > 2:
2874 raise writers.FortranWriter.FortranWriterError, \
2875 """Need ninitial = 1 or 2 to write auto_dsig file"""
2876
2877 replace_dict = {}
2878
2879 replace_dict['N_me'] = n
2880
2881
2882 info_lines = self.get_mg5_info_lines()
2883 replace_dict['info_lines'] = info_lines
2884
2885
2886 process_lines = self.get_process_info_lines(matrix_element)
2887 replace_dict['process_lines'] = process_lines
2888
2889 pdf_vars, pdf_data, pdf_lines = \
2890 self.get_pdf_lines_mir(matrix_element, ninitial, False, False)
2891 replace_dict['pdf_vars'] = pdf_vars
2892 replace_dict['pdf_data'] = pdf_data
2893 replace_dict['pdf_lines'] = pdf_lines
2894
2895 file = open(os.path.join(_file_path, \
2896 'iolibs/template_files/parton_lum_n_fks.inc')).read()
2897 file = file % replace_dict
2898
2899
2900 writer.writelines(file)
2901
2902
2903
2904
2905
2906
2908 """Write the coloramps.inc file for MadEvent"""
2909
2910 lines = []
2911 lines.append( "logical icolamp(%d,%d,1)" % \
2912 (max([len(me.get('color_basis').keys()), 1]),
2913 len(mapconfigs)))
2914
2915 lines += self.get_icolamp_lines(mapconfigs, me, 1)
2916
2917
2918 writer.writelines(lines)
2919
2920 return True
2921
2922
2923
2924
2925
2927 """Write the leshouche.inc file for MG4"""
2928
2929
2930 (nexternal, ninitial) = me.get_nexternal_ninitial()
2931
2932 lines = []
2933
2934 for iproc, proc in enumerate(me.get('processes')):
2935 legs = proc.get_legs_with_decays()
2936 lines.append("DATA (IDUP(i,%d),i=1,%d)/%s/" % \
2937 (iproc + 1, nexternal,
2938 ",".join([str(l.get('id')) for l in legs])))
2939 for i in [1, 2]:
2940 lines.append("DATA (MOTHUP(%d,i,%3r),i=1,%2r)/%s/" % \
2941 (i, iproc + 1, nexternal,
2942 ",".join([ "%3r" % 0 ] * ninitial + \
2943 [ "%3r" % i ] * (nexternal - ninitial))))
2944
2945
2946
2947 if iproc == 0:
2948
2949 if not me.get('color_basis'):
2950 for i in [1, 2]:
2951 lines.append("DATA (ICOLUP(%d,i, 1),i=1,%2r)/%s/" % \
2952 (i, nexternal,
2953 ",".join([ "%3r" % 0 ] * nexternal)))
2954 color_flow_list = []
2955
2956 else:
2957
2958 repr_dict = {}
2959 for l in legs:
2960 repr_dict[l.get('number')] = \
2961 proc.get('model').get_particle(l.get('id')).get_color()\
2962 * (-1)**(1+l.get('state'))
2963
2964 color_flow_list = \
2965 me.get('color_basis').color_flow_decomposition(repr_dict, ninitial)
2966
2967 for cf_i, color_flow_dict in enumerate(color_flow_list):
2968 for i in [0, 1]:
2969 lines.append("DATA (ICOLUP(%d,i,%3r),i=1,%2r)/%s/" % \
2970 (i + 1, cf_i + 1, nexternal,
2971 ",".join(["%3r" % color_flow_dict[l.get('number')][i] \
2972 for l in legs])))
2973
2974
2975 writer.writelines(lines)
2976
2977 return len(color_flow_list)
2978
2979
2980
2981
2982
2984 """Write the configs.inc file for the list of born matrix-elements"""
2985
2986
2987 (nexternal, ninitial) = me.get_nexternal_ninitial()
2988 model = me.get('processes')[0].get('model')
2989 lines = ['', 'C Here are the congifurations']
2990 lines_P = ['', 'C Here are the propagators']
2991 lines_BW = ['', 'C Here are the BWs']
2992
2993 iconfig = 0
2994
2995 iconfig_list = []
2996 mapconfigs_list = []
2997 s_and_t_channels_list = []
2998 nschannels = []
2999
3000 particle_dict = me.get('processes')[0].get('model').\
3001 get('particle_dict')
3002
3003 booldict = {True: '.false.', False: '.false'}
3004
3005 max_leg_number = 0
3006
3007
3008 s_and_t_channels = []
3009 mapconfigs = []
3010 lines.extend(['C %s' % proc.nice_string() for proc in me.get('processes')])
3011 base_diagrams = me.get('base_amplitude').get('diagrams')
3012 minvert = min([max([len(vert.get('legs')) for vert in \
3013 diag.get('vertices')]) for diag in base_diagrams])
3014
3015 for idiag, diag in enumerate(base_diagrams):
3016 if any([len(vert.get('legs')) > minvert for vert in
3017 diag.get('vertices')]):
3018
3019 continue
3020 iconfig = iconfig + 1
3021 helas_diag = me.get('diagrams')[idiag]
3022 mapconfigs.append(helas_diag.get('number'))
3023 lines.append("# Diagram %d, Amplitude %d" % \
3024 (helas_diag.get('number'),helas_diag.get('amplitudes')[0]['number']))
3025
3026 lines.append("data mapconfig(%4d)/%4d/" % (iconfig,
3027 helas_diag.get('amplitudes')[0]['number']))
3028
3029
3030
3031 schannels, tchannels = helas_diag.get('amplitudes')[0].\
3032 get_s_and_t_channels(ninitial, model, 990)
3033
3034 s_and_t_channels.append([schannels, tchannels])
3035
3036
3037 allchannels = schannels
3038 if len(tchannels) > 1:
3039
3040 allchannels = schannels + tchannels
3041
3042 for vert in allchannels:
3043 daughters = [leg.get('number') for leg in vert.get('legs')[:-1]]
3044 last_leg = vert.get('legs')[-1]
3045 lines.append("data (iforest(ifr,%3d,%4d),ifr=1,%d)/%s/" % \
3046 (last_leg.get('number'), iconfig, len(daughters),
3047 ",".join(["%3d" % d for d in daughters])))
3048 if vert in schannels:
3049 lines.append("data sprop(%4d,%4d)/%8d/" % \
3050 (last_leg.get('number'), iconfig,
3051 last_leg.get('id')))
3052 elif vert in tchannels[:-1]:
3053 lines.append("data tprid(%4d,%4d)/%8d/" % \
3054 (last_leg.get('number'), iconfig,
3055 abs(last_leg.get('id'))))
3056
3057 max_leg_number = min(max_leg_number,last_leg.get('number'))
3058
3059
3060 lines.append("# Number of configs")
3061 lines.append("data mapconfig(0)/%4d/" % (iconfig))
3062
3063
3064 for iconf, config in enumerate(s_and_t_channels):
3065 schannels = config[0]
3066 nschannels.append(len(schannels))
3067 for vertex in schannels:
3068
3069
3070 leg = vertex.get('legs')[-1]
3071 lines_BW.append("data gForceBW(%d,%d)/%s/" % \
3072 (leg.get('number'), iconf + 1,
3073 booldict[leg.get('from_group')]))
3074
3075
3076 firstlines = []
3077 firstlines.append('integer ifr')
3078 firstlines.append('integer lmaxconfigsb_used\nparameter (lmaxconfigsb_used=%d)' % iconfig)
3079 firstlines.append('integer max_branchb_used\nparameter (max_branchb_used=%d)' % -max_leg_number)
3080 firstlines.append('integer mapconfig(0 : lmaxconfigsb_used)')
3081 firstlines.append('integer iforest(2, -max_branchb_used:-1, lmaxconfigsb_used)')
3082 firstlines.append('integer sprop(-max_branchb_used:-1, lmaxconfigsb_used)')
3083 firstlines.append('integer tprid(-max_branchb_used:-1, lmaxconfigsb_used)')
3084 firstlines.append('logical gforceBW(-max_branchb_used : -1, lmaxconfigsb_used)')
3085
3086
3087 writer.writelines(firstlines + lines + lines_BW)
3088
3089 return iconfig, mapconfigs, s_and_t_channels
3090
3091
3092
3093
3094
3096 """Write the configs.inc file for the list of born matrix-elements"""
3097
3098
3099 lines_P = ['', 'C Here are the propagators']
3100
3101 particle_dict = me.get('processes')[0].get('model').\
3102 get('particle_dict')
3103
3104 for iconf, configs in enumerate(s_and_t_channels):
3105 for vertex in configs[0] + configs[1][:-1]:
3106 leg = vertex.get('legs')[-1]
3107 if leg.get('id') == 21 and 21 not in particle_dict:
3108
3109 mass = 'zero'
3110 width = 'zero'
3111 pow_part = 0
3112 else:
3113 particle = particle_dict[leg.get('id')]
3114
3115 if particle.get('mass').lower() == 'zero':
3116 mass = particle.get('mass')
3117 else:
3118 mass = "abs(%s)" % particle.get('mass')
3119
3120 if particle.get('width').lower() == 'zero':
3121 width = particle.get('width')
3122 else:
3123 width = "abs(%s)" % particle.get('width')
3124
3125 pow_part = 1 + int(particle.is_boson())
3126
3127 lines_P.append("pmass(%3d,%4d) = %s" % \
3128 (leg.get('number'), iconf + 1, mass))
3129 lines_P.append("pwidth(%3d,%4d) = %s" % \
3130 (leg.get('number'), iconf + 1, width))
3131 lines_P.append("pow(%3d,%4d) = %d" % \
3132 (leg.get('number'), iconf + 1, pow_part))
3133
3134
3135 writer.writelines(lines_P)
3136
3137
3138
3139
3140
3141
3142
3144 """Write the dname.mg file for MG4"""
3145
3146 line = "DIRNAME=P%s" % \
3147 matrix_element.get('processes')[0].shell_string()
3148
3149
3150 writer.write(line + "\n")
3151
3152 return True
3153
3154
3155
3156
3157
3159 """Write the iproc.dat file for MG4"""
3160
3161 line = "%d" % (me_number + 1)
3162
3163
3164 for line_to_write in writer.write_line(line):
3165 writer.write(line_to_write)
3166 return True
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3179 """generate the lines for fks.inc describing initializating the
3180 fks_j_from_i array"""
3181 lines = []
3182 if not me.isfinite:
3183 for ii, js in me.fks_j_from_i.items():
3184 if js:
3185 lines.append('DATA (FKS_J_FROM_I_D(%d, %d, JPOS), JPOS = 0, %d) / %d, %s /' \
3186 % (i, ii, len(js), len(js), ', '.join(["%d" % j for j in js])))
3187 else:
3188 lines.append('DATA (FKS_J_FROM_I_D(%d, JPOS), JPOS = 0, %d) / %d, %s /' \
3189 % (2, 1, 1, '1'))
3190 lines.append('')
3191
3192 return lines
3193
3194
3195
3196
3197
3199
3200 """Write the leshouche.inc file for MG4"""
3201
3202
3203 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial()
3204
3205 lines = []
3206 for iproc, proc in enumerate(matrix_element.get('processes')):
3207 legs = proc.get_legs_with_decays()
3208 lines.append("I %4d %4d %s" % \
3209 (ime, iproc + 1,
3210 " ".join([str(l.get('id')) for l in legs])))
3211 for i in [1, 2]:
3212 lines.append("M %4d %4d %4d %s" % \
3213 (ime, i, iproc + 1,
3214 " ".join([ "%3d" % 0 ] * ninitial + \
3215 [ "%3d" % i ] * (nexternal - ninitial))))
3216
3217
3218
3219 if iproc == 0:
3220
3221 if not matrix_element.get('color_basis'):
3222 for i in [1, 2]:
3223 lines.append("C %4d %4d 1 %s" % \
3224 (ime, i,
3225 " ".join([ "%3d" % 0 ] * nexternal)))
3226 color_flow_list = []
3227 nflow = 1
3228
3229 else:
3230
3231 repr_dict = {}
3232 for l in legs:
3233 repr_dict[l.get('number')] = \
3234 proc.get('model').get_particle(l.get('id')).get_color()\
3235 * (-1)**(1+l.get('state'))
3236
3237 color_flow_list = \
3238 matrix_element.get('color_basis').color_flow_decomposition(repr_dict,
3239 ninitial)
3240
3241 for cf_i, color_flow_dict in enumerate(color_flow_list):
3242 for i in [0, 1]:
3243 lines.append("C %4d %4d %4d %s" % \
3244 (ime, i + 1, cf_i + 1,
3245 " ".join(["%3d" % color_flow_dict[l.get('number')][i] \
3246 for l in legs])))
3247
3248 nflow = len(color_flow_list)
3249
3250 nproc = len(matrix_element.get('processes'))
3251
3252 return lines, nproc, nflow
3253
3254
3256
3257 """As get_leshouche_lines, but for 'fake' real emission processes (LOonly
3258 In this case, write born color structure times ij -> i,j splitting)
3259 """
3260
3261 bornproc = matrix_element.get('processes')[0]
3262 colors = [l.get('color') for l in bornproc.get('legs')]
3263
3264 fks_i = len(colors)
3265
3266
3267 fks_j=1
3268 for cpos, col in enumerate(colors):
3269 if col != 1:
3270 fks_j = cpos+1
3271
3272
3273 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial()
3274 nexternal+=1
3275
3276 lines = []
3277 for iproc, proc in enumerate(matrix_element.get('processes')):
3278
3279 legs = proc.get_legs_with_decays() + \
3280 [fks_common.FKSLeg({'id': -21,
3281 'number': nexternal,
3282 'state': True,
3283 'fks': 'i',
3284 'color': 8,
3285 'charge': 0.,
3286 'massless': True,
3287 'spin': 3,
3288 'is_part': True,
3289 'self_antipart': True})]
3290
3291 lines.append("I %4d %4d %s" % \
3292 (ime, iproc + 1,
3293 " ".join([str(l.get('id')) for l in legs])))
3294 for i in [1, 2]:
3295 lines.append("M %4d %4d %4d %s" % \
3296 (ime, i, iproc + 1,
3297 " ".join([ "%3d" % 0 ] * ninitial + \
3298 [ "%3d" % i ] * (nexternal - ninitial))))
3299
3300
3301
3302 if iproc == 0:
3303
3304 if not matrix_element.get('color_basis'):
3305 for i in [1, 2]:
3306 lines.append("C %4d %4d 1 %s" % \
3307 (ime, i,
3308 " ".join([ "%3d" % 0 ] * nexternal)))
3309 color_flow_list = []
3310 nflow = 1
3311
3312 else:
3313
3314
3315
3316 repr_dict = {}
3317 for l in legs[:-1]:
3318 repr_dict[l.get('number')] = \
3319 proc.get('model').get_particle(l.get('id')).get_color()\
3320 * (-1)**(1+l.get('state'))
3321
3322 color_flow_list = \
3323 matrix_element.get('color_basis').color_flow_decomposition(repr_dict,
3324 ninitial)
3325
3326 for cf_i, color_flow_dict in enumerate(color_flow_list):
3327
3328
3329 maxicol = max(sum(color_flow_dict.values(), []))
3330
3331 if color_flow_dict[fks_j][0] == 0:
3332 anti = True
3333 icol_j = color_flow_dict[fks_j][1]
3334 else:
3335 anti = False
3336 icol_j = color_flow_dict[fks_j][0]
3337
3338 if anti:
3339 color_flow_dict[nexternal] = (maxicol + 1, color_flow_dict[fks_j][1])
3340 color_flow_dict[fks_j][1] = maxicol + 1
3341 else:
3342 color_flow_dict[nexternal] = (color_flow_dict[fks_j][0], maxicol + 1)
3343 color_flow_dict[fks_j][0] = maxicol + 1
3344
3345 for i in [0, 1]:
3346 lines.append("C %4d %4d %4d %s" % \
3347 (ime, i + 1, cf_i + 1,
3348 " ".join(["%3d" % color_flow_dict[l.get('number')][i] \
3349 for l in legs])))
3350
3351 nflow = len(color_flow_list)
3352
3353 nproc = len(matrix_element.get('processes'))
3354
3355 return lines, nproc, nflow
3356
3357
3358
3359
3360
3362 """returns the lines with the information on the denominator keeping care
3363 of the identical particle factors in the various real emissions
3364 If born_me is procided, it is used instead of fksborn.born_me"""
3365
3366 compensate = True
3367 if not born_me:
3368 born_me = fks_born.born_me
3369 compensate = False
3370
3371 lines = []
3372 info_list = fks_born.get_fks_info_list()
3373 if info_list:
3374
3375 lines.append('INTEGER IDEN_VALUES(%d)' % len(info_list))
3376 if not compensate:
3377 lines.append('DATA IDEN_VALUES /' + \
3378 ', '.join(['%d' % (
3379 born_me.get_denominator_factor()) \
3380 for info in info_list]) + '/')
3381 else:
3382 lines.append('DATA IDEN_VALUES /' + \
3383 ', '.join(['%d' % (
3384 born_me.get_denominator_factor() / \
3385 born_me['identical_particle_factor'] * \
3386 fks_born.born_me['identical_particle_factor']) \
3387 for info in info_list]) + '/')
3388 else:
3389
3390 lines.append('INTEGER IDEN_VALUES(1)')
3391 lines.append('DATA IDEN_VALUES / %d /' \
3392 % fks_born.born_me.get_denominator_factor())
3393
3394 return lines
3395
3396
3397
3398
3399
3401 """returns the lines with the information on the particle number of the born
3402 that splits"""
3403 info_list = fks_born.get_fks_info_list()
3404 lines = []
3405 if info_list:
3406
3407
3408 ij_list = [info['fks_info']['ij']if \
3409 fks_born.born_me['processes'][0]['legs'][info['fks_info']['ij']-1]['massless'] \
3410 else 0 for info in info_list]
3411 lines.append('INTEGER IJ_VALUES(%d)' % len(info_list))
3412 lines.append('DATA IJ_VALUES /' + ', '.join(['%d' % ij for ij in ij_list]) + '/')
3413 else:
3414
3415 lines.append('INTEGER IJ_VALUES(1)')
3416 lines.append('DATA IJ_VALUES / 0 /')
3417
3418 return lines
3419
3420
3421 - def get_pdf_lines_mir(self, matrix_element, ninitial, subproc_group = False,\
3422 mirror = False):
3423 """Generate the PDF lines for the auto_dsig.f file"""
3424
3425 processes = matrix_element.get('processes')
3426 model = processes[0].get('model')
3427
3428 pdf_definition_lines = ""
3429 pdf_data_lines = ""
3430 pdf_lines = ""
3431
3432 if ninitial == 1:
3433 pdf_lines = "PD(0) = 0d0\nIPROC = 0\n"
3434 for i, proc in enumerate(processes):
3435 process_line = proc.base_string()
3436 pdf_lines = pdf_lines + "IPROC=IPROC+1 ! " + process_line
3437 pdf_lines = pdf_lines + "\nPD(IPROC) = 1d0\n"
3438 pdf_lines = pdf_lines + "\nPD(0)=PD(0)+PD(IPROC)\n"
3439 else:
3440
3441 initial_states = [sorted(list(set([p.get_initial_pdg(1) for \
3442 p in processes]))),
3443 sorted(list(set([p.get_initial_pdg(2) for \
3444 p in processes])))]
3445
3446
3447 pdf_codes = dict([(p, model.get_particle(p).get_name()) for p in \
3448 sum(initial_states,[])])
3449 for key,val in pdf_codes.items():
3450 pdf_codes[key] = val.replace('~','x').replace('+','p').replace('-','m')
3451
3452
3453 pdgtopdf = {21: 0, 22: 7, -11: -8, 11: 8, -13: -9, 13: 9, -15: -10, 15: 10}
3454
3455 for pdg in sum(initial_states,[]):
3456 if not pdg in pdgtopdf and not pdg in pdgtopdf.values():
3457 pdgtopdf[pdg] = pdg
3458 elif pdg not in pdgtopdf and pdg in pdgtopdf.values():
3459
3460 pdgtopdf[pdg] = 6000000 + pdg
3461
3462
3463 for i in [0,1]:
3464 pdf_definition_lines += "DOUBLE PRECISION " + \
3465 ",".join(["%s%d" % (pdf_codes[pdg],i+1) \
3466 for pdg in \
3467 initial_states[i]]) + \
3468 "\n"
3469
3470
3471 for i in [0,1]:
3472 pdf_data_lines += "DATA " + \
3473 ",".join(["%s%d" % (pdf_codes[pdg],i+1) \
3474 for pdg in initial_states[i]]) + \
3475 "/%d*1D0/" % len(initial_states[i]) + \
3476 "\n"
3477
3478
3479 for i, init_states in enumerate(initial_states):
3480 if not mirror:
3481 ibeam = i + 1
3482 else:
3483 ibeam = 2 - i
3484 if subproc_group:
3485 pdf_lines = pdf_lines + \
3486 "IF (ABS(LPP(IB(%d))).GE.1) THEN\nLP=SIGN(1,LPP(IB(%d)))\n" \
3487 % (ibeam, ibeam)
3488 else:
3489 pdf_lines = pdf_lines + \
3490 "IF (ABS(LPP(%d)) .GE. 1) THEN\nLP=SIGN(1,LPP(%d))\n" \
3491 % (ibeam, ibeam)
3492
3493 for initial_state in init_states:
3494 if initial_state in pdf_codes.keys():
3495 if subproc_group:
3496 if abs(pdgtopdf[initial_state]) <= 10:
3497 pdf_lines = pdf_lines + \
3498 ("%s%d=PDG2PDF(ABS(LPP(IB(%d))),%d*LP," + \
3499 "XBK(IB(%d)),DSQRT(Q2FACT(%d)))\n") % \
3500 (pdf_codes[initial_state],
3501 i + 1, ibeam, pdgtopdf[initial_state],
3502 ibeam, ibeam)
3503 else:
3504
3505 pdf_lines = pdf_lines + \
3506 ("c settings other partons flavours outside quark, gluon, photon to 0d0\n" + \
3507 "%s%d=0d0\n") % \
3508 (pdf_codes[initial_state],i + 1)
3509 else:
3510 if abs(pdgtopdf[initial_state]) <= 10:
3511 pdf_lines = pdf_lines + \
3512 ("%s%d=PDG2PDF(ABS(LPP(%d)),%d*LP," + \
3513 "XBK(%d),DSQRT(Q2FACT(%d)))\n") % \
3514 (pdf_codes[initial_state],
3515 i + 1, ibeam, pdgtopdf[initial_state],
3516 ibeam, ibeam)
3517 else:
3518
3519 pdf_lines = pdf_lines + \
3520 ("c settings other partons flavours outside quark, gluon, photon to 0d0\n" + \
3521 "%s%d=0d0\n") % \
3522 (pdf_codes[initial_state],i + 1)
3523
3524 pdf_lines = pdf_lines + "ENDIF\n"
3525
3526
3527 pdf_lines = pdf_lines + "PD(0) = 0d0\nIPROC = 0\n"
3528 for proc in processes:
3529 process_line = proc.base_string()
3530 pdf_lines = pdf_lines + "IPROC=IPROC+1 ! " + process_line
3531 pdf_lines = pdf_lines + "\nPD(IPROC) = "
3532 for ibeam in [1, 2]:
3533 initial_state = proc.get_initial_pdg(ibeam)
3534 if initial_state in pdf_codes.keys():
3535 pdf_lines = pdf_lines + "%s%d*" % \
3536 (pdf_codes[initial_state], ibeam)
3537 else:
3538 pdf_lines = pdf_lines + "1d0*"
3539
3540 pdf_lines = pdf_lines[:-1] + "\n"
3541
3542
3543 return pdf_definition_lines[:-1], pdf_data_lines[:-1], pdf_lines[:-1]
3544
3545
3546
3548 """Return the color matrix definition lines for the given color_matrix. Split
3549 rows in chunks of size n."""
3550
3551 if not color_matrix:
3552 return ["DATA Denom(1)/1/", "DATA (CF(i,1),i=1,1) /1/"]
3553 else:
3554 ret_list = []
3555 my_cs = color.ColorString()
3556 for index, denominator in \
3557 enumerate(color_matrix.get_line_denominators()):
3558
3559 ret_list.append("DATA Denom(%i)/%i/" % (index + 1, denominator))
3560
3561 num_list = color_matrix.get_line_numerators(index, denominator)
3562 for k in xrange(0, len(num_list), n):
3563 ret_list.append("DATA (CF(i,%3r),i=%3r,%3r) /%s/" % \
3564 (index + 1, k + 1, min(k + n, len(num_list)),
3565 ','.join(["%5r" % i for i in num_list[k:k + n]])))
3566
3567 return ret_list
3568
3569
3570
3571
3574 """Write the maxamps.inc file for MG4."""
3575
3576 file = " integer maxamps, maxflow, maxproc, maxsproc\n"
3577 file = file + "parameter (maxamps=%d, maxflow=%d)\n" % \
3578 (maxamps, maxflows)
3579 file = file + "parameter (maxproc=%d, maxsproc=%d)" % \
3580 (maxproc, maxsproc)
3581
3582
3583 writer.writelines(file)
3584
3585 return True
3586
3587
3588
3589
3591
3592 """Write the ncombs.inc file for MadEvent."""
3593
3594
3595 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial()
3596
3597
3598 file = " integer n_max_cl\n"
3599 file = file + "parameter (n_max_cl=%d)" % (2 ** (nexternal+1))
3600
3601
3602 writer.writelines(file)
3603
3604 return True
3605
3606
3607
3608
3610 """Write a dummy config_subproc.inc file for MadEvent"""
3611
3612 lines = []
3613
3614 for iconfig in range(len(s_and_t_channels)):
3615 lines.append("DATA CONFSUB(1,%d)/1/" % \
3616 (iconfig + 1))
3617
3618
3619 writer.writelines(lines)
3620
3621 return True
3622
3623
3624
3625
3627 """Write the get_color.f file for MadEvent, which returns color
3628 for all particles used in the matrix element."""
3629
3630 try:
3631 matrix_elements=matrix_element.real_processes[0].matrix_element
3632 except IndexError:
3633 matrix_elements=[matrix_element.born_me]
3634
3635 if isinstance(matrix_elements, helas_objects.HelasMatrixElement):
3636 matrix_elements = [matrix_elements]
3637
3638 model = matrix_elements[0].get('processes')[0].get('model')
3639
3640
3641
3642
3643 wf_ids = set(sum([sum([sum([sum([[wf.get_pdg_code(),wf.get_anti_pdg_code()] \
3644 for wf in d.get('wavefunctions')],[]) \
3645 for d in me.get('diagrams')],[]) \
3646 for me in [real_proc.matrix_element]],[])\
3647 for real_proc in matrix_element.real_processes],[]))
3648
3649 wf_ids = wf_ids.union(set(sum([sum([[wf.get_pdg_code(),wf.get_anti_pdg_code()] \
3650 for wf in d.get('wavefunctions')],[]) \
3651 for d in matrix_element.born_me.get('diagrams')],[])))
3652
3653
3654 leg_ids = set(sum([sum([sum([[l.get('id') for l in \
3655 p.get_legs_with_decays()] for p in \
3656 me.get('processes')], []) for me in \
3657 [real_proc.matrix_element]], []) for real_proc in \
3658 matrix_element.real_processes],[]))
3659
3660 leg_ids = leg_ids.union(set(sum([[l.get('id') for l in \
3661 p.get_legs_with_decays()] for p in \
3662 matrix_element.born_me.get('processes')], [])))
3663 particle_ids = sorted(list(wf_ids.union(leg_ids)))
3664
3665 lines = """function get_color(ipdg)
3666 implicit none
3667 integer get_color, ipdg
3668
3669 if(ipdg.eq.%d)then
3670 get_color=%d
3671 return
3672 """ % (particle_ids[0], model.get_particle(particle_ids[0]).get_color())
3673
3674 for part_id in particle_ids[1:]:
3675 lines += """else if(ipdg.eq.%d)then
3676 get_color=%d
3677 return
3678 """ % (part_id, model.get_particle(part_id).get_color())
3679
3680
3681 lines += """else if(ipdg.eq.%d)then
3682 c This is dummy particle used in multiparticle vertices
3683 get_color=2
3684 return
3685 """ % model.get_first_non_pdg()
3686 lines += """else
3687 write(*,*)'Error: No color given for pdg ',ipdg
3688 get_color=0
3689 return
3690 endif
3691 end
3692 """
3693
3694
3695 writer.writelines(lines)
3696
3697 return True
3698
3699
3700
3701
3702
3703 - def write_props_file(self, writer, matrix_element, fortran_model, s_and_t_channels):
3704 """Write the props.inc file for MadEvent. Needs input from
3705 write_configs_file. With respect to the parent routine, it has some
3706 more specific formats that allow the props.inc file to be read by the
3707 link program"""
3708
3709 lines = []
3710
3711 particle_dict = matrix_element.get('processes')[0].get('model').\
3712 get('particle_dict')
3713
3714 for iconf, configs in enumerate(s_and_t_channels):
3715 for vertex in configs[0] + configs[1][:-1]:
3716 leg = vertex.get('legs')[-1]
3717 if leg.get('id') not in particle_dict:
3718
3719 mass = 'zero'
3720 width = 'zero'
3721 pow_part = 0
3722 else:
3723 particle = particle_dict[leg.get('id')]
3724
3725 if particle.get('mass').lower() == 'zero':
3726 mass = particle.get('mass')
3727 else:
3728 mass = "abs(%s)" % particle.get('mass')
3729
3730 if particle.get('width').lower() == 'zero':
3731 width = particle.get('width')
3732 else:
3733 width = "abs(%s)" % particle.get('width')
3734
3735 pow_part = 1 + int(particle.is_boson())
3736
3737 lines.append("pmass(%3d,%4d) = %s" % \
3738 (leg.get('number'), iconf + 1, mass))
3739 lines.append("pwidth(%3d,%4d) = %s" % \
3740 (leg.get('number'), iconf + 1, width))
3741 lines.append("pow(%3d,%4d) = %d" % \
3742 (leg.get('number'), iconf + 1, pow_part))
3743
3744
3745 writer.writelines(lines)
3746
3747 return True
3748
3749
3750
3751
3752
3754 """Append this subprocess to the subproc.mg file for MG4"""
3755
3756
3757 writer.write(subprocdir + "\n")
3758
3759 return True
3760
3761
3762
3763
3764
3765
3768 """Class to take care of exporting a set of matrix elements to
3769 Fortran (v4) format."""
3770
3771
3774
3775
3776
3777
3778
3780 """create the directory run_name as a copy of the MadEvent
3781 Template, and clean the directory
3782 For now it is just the same as copy_v4template, but it will be modified
3783 """
3784 mgme_dir = self.mgme_dir
3785 dir_path = self.dir_path
3786 clean =self.opt['clean']
3787
3788
3789 if not os.path.isdir(dir_path):
3790 if not mgme_dir:
3791 raise MadGraph5Error, \
3792 "No valid MG_ME path given for MG4 run directory creation."
3793 logger.info('initialize a new directory: %s' % \
3794 os.path.basename(dir_path))
3795 shutil.copytree(os.path.join(mgme_dir, 'Template', 'NLO'), dir_path, True)
3796
3797 dir_util.copy_tree(pjoin(self.mgme_dir, 'Template', 'Common'),
3798 dir_path)
3799
3800 for card in ['plot_card']:
3801 if os.path.isfile(pjoin(self.dir_path, 'Cards',card + '.dat')):
3802 try:
3803 shutil.copy(pjoin(self.dir_path, 'Cards', card + '.dat'),
3804 pjoin(self.dir_path, 'Cards', card + '_default.dat'))
3805 except IOError:
3806 logger.warning("Failed to copy " + card + ".dat to default")
3807
3808 elif not os.path.isfile(os.path.join(dir_path, 'TemplateVersion.txt')):
3809 if not mgme_dir:
3810 raise MadGraph5Error, \
3811 "No valid MG_ME path given for MG4 run directory creation."
3812 try:
3813 shutil.copy(os.path.join(mgme_dir, 'MGMEVersion.txt'), dir_path)
3814 except IOError:
3815 MG5_version = misc.get_pkg_info()
3816 open(os.path.join(dir_path, 'MGMEVersion.txt'), 'w').write( \
3817 "5." + MG5_version['version'])
3818
3819
3820 if clean:
3821 logger.info('remove old information in %s' % os.path.basename(dir_path))
3822 if os.environ.has_key('MADGRAPH_BASE'):
3823 subprocess.call([os.path.join('bin', 'internal', 'clean_template'),
3824 '--web'], cwd=dir_path)
3825 else:
3826 try:
3827 subprocess.call([os.path.join('bin', 'internal', 'clean_template')], \
3828 cwd=dir_path)
3829 except Exception, why:
3830 raise MadGraph5Error('Failed to clean correctly %s: \n %s' \
3831 % (os.path.basename(dir_path),why))
3832
3833 MG_version = misc.get_pkg_info()
3834 open(os.path.join(dir_path, 'SubProcesses', 'MGVersion.txt'), 'w').write(
3835 MG_version['version'])
3836
3837
3838 self.link_CutTools(dir_path)
3839
3840 link_tir_libs=[]
3841 tir_libs=[]
3842 tir_include=[]
3843 for tir in self.all_tir:
3844 tir_dir="%s_dir"%tir
3845 libpath=getattr(self,tir_dir)
3846 libpath = self.link_TIR(os.path.join(self.dir_path, 'lib'),
3847 libpath,"lib%s.a"%tir,tir_name=tir)
3848 setattr(self,tir_dir,libpath)
3849 if libpath != "":
3850 if tir in ['pjfry','ninja','golem', 'samurai','collier']:
3851
3852
3853 link_tir_libs.append('-L%s/ -l%s'%(libpath,tir))
3854 tir_libs.append('%s/lib%s.$(libext)'%(libpath,tir))
3855
3856 if tir in ['ninja']:
3857 if not any(os.path.isfile(pjoin(libpath,'libavh_olo.%s'%ext))
3858 for ext in ['a','dylib','so']):
3859 raise MadGraph5Error(
3860 "The OneLOop library 'libavh_olo.(a|dylib|so)' could no be found in path '%s'. Please place a symlink to it there."%libpath)
3861 link_tir_libs.append('-L%s/ -l%s'%(libpath,'avh_olo'))
3862 tir_libs.append('%s/lib%s.$(libext)'%(libpath,'avh_olo'))
3863
3864 if tir in ['golem','samurai','ninja','collier']:
3865 trg_path = pjoin(os.path.dirname(libpath),'include')
3866 if os.path.isdir(trg_path):
3867 to_include = misc.find_includes_path(trg_path,
3868 self.include_names[tir])
3869 else:
3870 to_include = None
3871
3872 if to_include is None and tir=='collier':
3873 to_include = misc.find_includes_path(
3874 pjoin(libpath,'modules'),self.include_names[tir])
3875 if to_include is None:
3876 logger.error(
3877 'Could not find the include directory for %s, looking in %s.\n' % (tir ,str(trg_path))+
3878 'Generation carries on but you will need to edit the include path by hand in the makefiles.')
3879 to_include = '<Not_found_define_it_yourself>'
3880 tir_include.append('-I %s'%to_include)
3881 else:
3882 link_tir_libs.append('-l%s'%tir)
3883 tir_libs.append('$(LIBDIR)lib%s.$(libext)'%tir)
3884
3885 os.remove(os.path.join(self.dir_path,'SubProcesses','makefile_loop.inc'))
3886 cwd = os.getcwd()
3887 dirpath = os.path.join(self.dir_path, 'SubProcesses')
3888 try:
3889 os.chdir(dirpath)
3890 except os.error:
3891 logger.error('Could not cd to directory %s' % dirpath)
3892 return 0
3893 filename = 'makefile_loop'
3894 calls = self.write_makefile_TIR(writers.MakefileWriter(filename),
3895 link_tir_libs,tir_libs,tir_include=tir_include)
3896 os.remove(os.path.join(self.dir_path,'Source','make_opts.inc'))
3897 dirpath = os.path.join(self.dir_path, 'Source')
3898 try:
3899 os.chdir(dirpath)
3900 except os.error:
3901 logger.error('Could not cd to directory %s' % dirpath)
3902 return 0
3903 filename = 'make_opts'
3904 calls = self.write_make_opts(writers.MakefileWriter(filename),
3905 link_tir_libs,tir_libs)
3906
3907 os.chdir(cwd)
3908
3909 cwd = os.getcwd()
3910 dirpath = os.path.join(self.dir_path, 'SubProcesses')
3911 try:
3912 os.chdir(dirpath)
3913 except os.error:
3914 logger.error('Could not cd to directory %s' % dirpath)
3915 return 0
3916
3917
3918 cpfiles= ["SubProcesses/MadLoopParamReader.f",
3919 "Cards/MadLoopParams.dat",
3920 "SubProcesses/MadLoopParams.inc"]
3921
3922 for file in cpfiles:
3923 shutil.copy(os.path.join(self.loop_dir,'StandAlone/', file),
3924 os.path.join(self.dir_path, file))
3925
3926 shutil.copy(pjoin(self.dir_path, 'Cards','MadLoopParams.dat'),
3927 pjoin(self.dir_path, 'Cards','MadLoopParams_default.dat'))
3928
3929
3930
3931 if os.path.exists(pjoin(self.dir_path, 'Cards', 'MadLoopParams.dat')):
3932 self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.dir_path,
3933 'Cards', 'MadLoopParams.dat'))
3934
3935 self.MadLoopparam.write(pjoin(self.dir_path,"SubProcesses",
3936 "MadLoopParams.dat"))
3937
3938
3939 MadLoopCommon = open(os.path.join(self.loop_dir,'StandAlone',
3940 "SubProcesses","MadLoopCommons.inc")).read()
3941 writer = writers.FortranWriter(os.path.join(self.dir_path,
3942 "SubProcesses","MadLoopCommons.f"))
3943 writer.writelines(MadLoopCommon%{
3944 'print_banner_commands':self.MadLoop_banner},
3945 context={'collier_available':self.tir_available_dict['collier']})
3946 writer.close()
3947
3948
3949 model_path = self.dir_path + '/Source/MODEL/'
3950
3951 if os.path.isfile(os.path.join(model_path,'mp_coupl.inc')):
3952 ln(model_path + '/mp_coupl.inc', self.dir_path + '/SubProcesses')
3953 if os.path.isfile(os.path.join(model_path,'mp_coupl_same_name.inc')):
3954 ln(model_path + '/mp_coupl_same_name.inc', \
3955 self.dir_path + '/SubProcesses')
3956
3957
3958 self.write_mp_files(writers.FortranWriter('cts_mprec.h'),\
3959 writers.FortranWriter('cts_mpc.h'),)
3960
3961 self.copy_python_files()
3962
3963
3964
3965 self.write_pdf_opendata()
3966
3967
3968
3969 os.chdir(cwd)
3970
3972 """writes the V**** directory inside the P**** directories specified in
3973 dir_name"""
3974
3975 cwd = os.getcwd()
3976
3977 matrix_element = loop_matrix_element
3978
3979
3980 dirpath = os.path.join(dir_name, 'MadLoop5_resources')
3981 try:
3982 os.mkdir(dirpath)
3983 except os.error as error:
3984 logger.warning(error.strerror + " " + dirpath)
3985
3986
3987 name = "V%s" % matrix_element.get('processes')[0].shell_string()
3988 dirpath = os.path.join(dir_name, name)
3989
3990 try:
3991 os.mkdir(dirpath)
3992 except os.error as error:
3993 logger.warning(error.strerror + " " + dirpath)
3994
3995 try:
3996 os.chdir(dirpath)
3997 except os.error:
3998 logger.error('Could not cd to directory %s' % dirpath)
3999 return 0
4000
4001 logger.info('Creating files in directory %s' % name)
4002
4003
4004 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial()
4005
4006 calls=self.write_loop_matrix_element_v4(None,matrix_element,fortran_model)
4007
4008
4009 ln(pjoin(self.dir_path, 'Source', 'DHELAS', 'coef_specs.inc'),
4010 abspath=False, cwd=None)
4011
4012
4013 filename = 'born_matrix.f'
4014 calls = self.write_bornmatrix(
4015 writers.FortranWriter(filename),
4016 matrix_element,
4017 fortran_model)
4018
4019 filename = 'nexternal.inc'
4020 self.write_nexternal_file(writers.FortranWriter(filename),
4021 nexternal, ninitial)
4022
4023 filename = 'pmass.inc'
4024 self.write_pmass_file(writers.FortranWriter(filename),
4025 matrix_element)
4026
4027 filename = 'ngraphs.inc'
4028 self.write_ngraphs_file(writers.FortranWriter(filename),
4029 len(matrix_element.get_all_amplitudes()))
4030
4031 filename = "loop_matrix.ps"
4032 writers.FortranWriter(filename).writelines("""C Post-helas generation loop-drawing is not ready yet.""")
4033 plot = draw.MultiEpsDiagramDrawer(base_objects.DiagramList(
4034 matrix_element.get('base_amplitude').get('loop_diagrams')[:1000]),
4035 filename,
4036 model=matrix_element.get('processes')[0].get('model'),
4037 amplitude='')
4038 logger.info("Drawing loop Feynman diagrams for " + \
4039 matrix_element.get('processes')[0].nice_string(\
4040 print_weighted=False))
4041 plot.draw()
4042
4043 filename = "born_matrix.ps"
4044 plot = draw.MultiEpsDiagramDrawer(matrix_element.get('base_amplitude').\
4045 get('born_diagrams'),
4046 filename,
4047 model=matrix_element.get('processes')[0].\
4048 get('model'),
4049 amplitude='')
4050 logger.info("Generating born Feynman diagrams for " + \
4051 matrix_element.get('processes')[0].nice_string(\
4052 print_weighted=False))
4053 plot.draw()
4054
4055
4056
4057
4058 self.write_global_specs(matrix_element, output_path=pjoin(dirpath,'global_specs.inc'))
4059
4060 open('unique_id.inc','w').write(
4061 """ integer UNIQUE_ID
4062 parameter(UNIQUE_ID=1)""")
4063
4064 linkfiles = ['coupl.inc', 'mp_coupl.inc', 'mp_coupl_same_name.inc',
4065 'cts_mprec.h', 'cts_mpc.h', 'MadLoopParamReader.f',
4066 'MadLoopParams.inc','MadLoopCommons.f']
4067
4068 for file in linkfiles:
4069 ln('../../%s' % file)
4070
4071 os.system("ln -s ../../makefile_loop makefile")
4072
4073
4074 ln(pjoin(os.path.pardir,os.path.pardir,'MadLoopParams.dat'),
4075 pjoin('..','MadLoop5_resources'))
4076
4077 linkfiles = ['mpmodule.mod']
4078
4079 for file in linkfiles:
4080 ln('../../../lib/%s' % file)
4081
4082 linkfiles = ['coef_specs.inc']
4083
4084 for file in linkfiles:
4085 ln('../../../Source/DHELAS/%s' % file)
4086
4087
4088 os.chdir(cwd)
4089
4090 if not calls:
4091 calls = 0
4092 return calls
4093
4094
4095
4096
4097
4099 """ writes the coef_specs.inc in the DHELAS folder. Should not be called in the
4100 non-optimized mode"""
4101 filename = os.path.join(self.dir_path, 'Source', 'DHELAS', 'coef_specs.inc')
4102
4103 replace_dict = {}
4104 replace_dict['max_lwf_size'] = 4
4105 replace_dict['vertex_max_coefs'] = max(\
4106 [q_polynomial.get_number_of_coefs_for_rank(n)
4107 for n in max_loop_vertex_ranks])
4108 IncWriter=writers.FortranWriter(filename,'w')
4109 IncWriter.writelines("""INTEGER MAXLWFSIZE
4110 PARAMETER (MAXLWFSIZE=%(max_lwf_size)d)
4111 INTEGER VERTEXMAXCOEFS
4112 PARAMETER (VERTEXMAXCOEFS=%(vertex_max_coefs)d)"""\
4113 % replace_dict)
4114 IncWriter.close()
4115