1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 """Several different checks for processes (and hence models):
16 permutation tests, gauge invariance tests, lorentz invariance
17 tests. Also class for evaluation of Python matrix elements,
18 MatrixElementEvaluator."""
19
20 from __future__ import division
21
22 import array
23 import copy
24 import fractions
25 import itertools
26 import logging
27 import math
28 import os
29 import sys
30 import re
31 import shutil
32 import random
33 import glob
34 import re
35 import subprocess
36 import time
37 import datetime
38 import errno
39 import pickle
40
41
42
43 import aloha
44 import aloha.aloha_writers as aloha_writers
45 import aloha.create_aloha as create_aloha
46
47 import madgraph.iolibs.export_python as export_python
48 import madgraph.iolibs.helas_call_writers as helas_call_writers
49 import models.import_ufo as import_ufo
50 import madgraph.iolibs.save_load_object as save_load_object
51 import madgraph.iolibs.file_writers as writers
52
53 import madgraph.core.base_objects as base_objects
54 import madgraph.core.color_algebra as color
55 import madgraph.core.color_amp as color_amp
56 import madgraph.core.helas_objects as helas_objects
57 import madgraph.core.diagram_generation as diagram_generation
58
59 import madgraph.various.rambo as rambo
60 import madgraph.various.misc as misc
61 import madgraph.various.progressbar as pbar
62 import madgraph.various.banner as bannermod
63 import madgraph.various.progressbar as pbar
64
65 import madgraph.loop.loop_diagram_generation as loop_diagram_generation
66 import madgraph.loop.loop_helas_objects as loop_helas_objects
67 import madgraph.loop.loop_base_objects as loop_base_objects
68 import models.check_param_card as check_param_card
69
70 from madgraph.interface.madevent_interface import MadLoopInitializer
71 from madgraph.interface.common_run_interface import AskforEditCard
72 from madgraph import MG5DIR, InvalidCmd, MadGraph5Error
73
74 from madgraph.iolibs.files import cp
75
76 import StringIO
77 import models.model_reader as model_reader
78 import aloha.template_files.wavefunctions as wavefunctions
79 from aloha.template_files.wavefunctions import \
80 ixxxxx, oxxxxx, vxxxxx, sxxxxx, txxxxx, irxxxx, orxxxx
81
82 ADDED_GLOBAL = []
83
84 temp_dir_prefix = "TMP_CHECK"
85
86 pjoin = os.path.join
89 for value in list(to_clean):
90 del globals()[value]
91 to_clean.remove(value)
92
97 """ Just an 'option container' to mimick the interface which is passed to the
98 tests. We put in only what is now used from interface by the test:
99 cmd.options['fortran_compiler']
100 cmd.options['complex_mass_scheme']
101 cmd._mgme_dir"""
102 - def __init__(self, mgme_dir = "", complex_mass_scheme = False,
103 fortran_compiler = 'gfortran' ):
104 self._mgme_dir = mgme_dir
105 self.options = {}
106 self.options['complex_mass_scheme']=complex_mass_scheme
107 self.options['fortran_compiler']=fortran_compiler
108
109
110
111
112
113 logger = logging.getLogger('madgraph.various.process_checks')
118 """boost the set momenta in the 'boost direction' by the 'beta'
119 factor"""
120
121 boost_p = []
122 gamma = 1/ math.sqrt(1 - beta**2)
123 for imp in p:
124 bosst_p = imp[boost_direction]
125 E, px, py, pz = imp
126 boost_imp = []
127
128 boost_imp.append(gamma * E - gamma * beta * bosst_p)
129
130 if boost_direction == 1:
131 boost_imp.append(-gamma * beta * E + gamma * px)
132 else:
133 boost_imp.append(px)
134
135 if boost_direction == 2:
136 boost_imp.append(-gamma * beta * E + gamma * py)
137 else:
138 boost_imp.append(py)
139
140 if boost_direction == 3:
141 boost_imp.append(-gamma * beta * E + gamma * pz)
142 else:
143 boost_imp.append(pz)
144
145 boost_p.append(boost_imp)
146
147 return boost_p
148
153 """Class taking care of matrix element evaluation, storing
154 relevant quantities for speedup."""
155
156 - def __init__(self, model , param_card = None,
157 auth_skipping = False, reuse = True, cmd = FakeInterface()):
158 """Initialize object with stored_quantities, helas_writer,
159 model, etc.
160 auth_skipping = True means that any identical matrix element will be
161 evaluated only once
162 reuse = True means that the matrix element corresponding to a
163 given process can be reused (turn off if you are using
164 different models for the same process)"""
165
166 self.cmd = cmd
167
168
169 self.helas_writer = helas_call_writers.PythonUFOHelasCallWriter(model)
170
171
172 self.full_model = model_reader.ModelReader(model)
173 try:
174 self.full_model.set_parameters_and_couplings(param_card)
175 except MadGraph5Error:
176 if isinstance(param_card, (str,file)):
177 raise
178 logger.warning('param_card present in the event file not compatible.'+
179 ' We will use the default one.')
180 self.full_model.set_parameters_and_couplings()
181
182 self.auth_skipping = auth_skipping
183 self.reuse = reuse
184 self.cmass_scheme = cmd.options['complex_mass_scheme']
185 self.store_aloha = []
186 self.stored_quantities = {}
187
188
189
190
191 - def evaluate_matrix_element(self, matrix_element, p=None, full_model=None,
192 gauge_check=False, auth_skipping=None, output='m2',
193 options=None):
194 """Calculate the matrix element and evaluate it for a phase space point
195 output is either m2, amp, jamp
196 """
197
198 if full_model:
199 self.full_model = full_model
200 process = matrix_element.get('processes')[0]
201 model = process.get('model')
202
203 if "matrix_elements" not in self.stored_quantities:
204 self.stored_quantities['matrix_elements'] = []
205 matrix_methods = {}
206
207 if self.reuse and "Matrix_%s" % process.shell_string() in globals() and p:
208
209 matrix = eval("Matrix_%s()" % process.shell_string())
210 me_value = matrix.smatrix(p, self.full_model)
211 if output == "m2":
212 return matrix.smatrix(p, self.full_model), matrix.amp2
213 else:
214 m2 = matrix.smatrix(p, self.full_model)
215 return {'m2': m2, output:getattr(matrix, output)}
216 if (auth_skipping or self.auth_skipping) and matrix_element in \
217 self.stored_quantities['matrix_elements']:
218
219 logger.info("Skipping %s, " % process.nice_string() + \
220 "identical matrix element already tested" \
221 )
222 return None
223
224 self.stored_quantities['matrix_elements'].append(matrix_element)
225
226
227
228
229 if "list_colorize" not in self.stored_quantities:
230 self.stored_quantities["list_colorize"] = []
231 if "list_color_basis" not in self.stored_quantities:
232 self.stored_quantities["list_color_basis"] = []
233 if "list_color_matrices" not in self.stored_quantities:
234 self.stored_quantities["list_color_matrices"] = []
235
236 col_basis = color_amp.ColorBasis()
237 new_amp = matrix_element.get_base_amplitude()
238 matrix_element.set('base_amplitude', new_amp)
239 colorize_obj = col_basis.create_color_dict_list(new_amp)
240
241 try:
242
243
244
245 col_index = self.stored_quantities["list_colorize"].index(colorize_obj)
246 except ValueError:
247
248
249 self.stored_quantities['list_colorize'].append(colorize_obj)
250 col_basis.build()
251 self.stored_quantities['list_color_basis'].append(col_basis)
252 col_matrix = color_amp.ColorMatrix(col_basis)
253 self.stored_quantities['list_color_matrices'].append(col_matrix)
254 col_index = -1
255
256
257 matrix_element.set('color_basis',
258 self.stored_quantities['list_color_basis'][col_index])
259 matrix_element.set('color_matrix',
260 self.stored_quantities['list_color_matrices'][col_index])
261
262
263 if "used_lorentz" not in self.stored_quantities:
264 self.stored_quantities["used_lorentz"] = []
265
266 me_used_lorentz = set(matrix_element.get_used_lorentz())
267 me_used_lorentz = [lorentz for lorentz in me_used_lorentz \
268 if lorentz not in self.store_aloha]
269
270 aloha_model = create_aloha.AbstractALOHAModel(model.get('name'))
271 aloha_model.add_Lorentz_object(model.get('lorentz'))
272 aloha_model.compute_subset(me_used_lorentz)
273
274
275 aloha_routines = []
276 for routine in aloha_model.values():
277 aloha_routines.append(routine.write(output_dir = None,
278 mode='mg5',
279 language = 'Python'))
280 for routine in aloha_model.external_routines:
281 aloha_routines.append(
282 open(aloha_model.locate_external(routine, 'Python')).read())
283
284
285 previous_globals = list(globals().keys())
286 for routine in aloha_routines:
287 exec(routine, globals())
288 for key in globals().keys():
289 if key not in previous_globals:
290 ADDED_GLOBAL.append(key)
291
292
293 self.store_aloha.extend(me_used_lorentz)
294
295 exporter = export_python.ProcessExporterPython(matrix_element,
296 self.helas_writer)
297 try:
298 matrix_methods = exporter.get_python_matrix_methods(\
299 gauge_check=gauge_check)
300
301 except helas_call_writers.HelasWriterError, error:
302 logger.info(error)
303 return None
304
305
306
307 if self.reuse:
308
309 exec(matrix_methods[process.shell_string()], globals())
310 ADDED_GLOBAL.append('Matrix_%s' % process.shell_string())
311 else:
312
313 exec(matrix_methods[process.shell_string()])
314
315 if not p:
316 p, w_rambo = self.get_momenta(process, options)
317
318 exec("data = Matrix_%s()" % process.shell_string())
319 if output == "m2":
320 return data.smatrix(p, self.full_model), data.amp2
321 else:
322 m2 = data.smatrix(p,self.full_model)
323 return {'m2': m2, output:getattr(data, output)}
324
325 @staticmethod
327 """ Check whether the specified kinematic point passes isolation cuts
328 """
329
330 def Pt(pmom):
331 """ Computes the pt of a 4-momentum"""
332 return math.sqrt(pmom[1]**2+pmom[2]**2)
333
334 def DeltaR(p1,p2):
335 """ Computes the DeltaR between two 4-momenta"""
336
337 p1_vec=math.sqrt(p1[1]**2+p1[2]**2+p1[3]**2)
338 p2_vec=math.sqrt(p2[1]**2+p2[2]**2+p2[3]**2)
339 eta1=0.5*math.log((p1_vec+p1[3])/(p1_vec-p1[3]))
340 eta2=0.5*math.log((p2_vec+p2[3])/(p2_vec-p2[3]))
341
342 phi1=math.atan2(p1[2],p1[1])
343 phi2=math.atan2(p2[2],p2[1])
344 dphi=abs(phi2-phi1)
345
346 dphi=abs(abs(dphi-math.pi)-math.pi)
347
348 return math.sqrt(dphi**2+(eta2-eta1)**2)
349
350 for i, pmom in enumerate(pmoms[2:]):
351
352 if Pt(pmom)<ptcut:
353 return False
354
355 for pmom2 in pmoms[3+i:]:
356 if DeltaR(pmom,pmom2)<drcut:
357 return False
358 return True
359
360
361
362
363 - def get_momenta(self, process, options=None, special_mass=None):
364 """Get a point in phase space for the external states in the given
365 process, with the CM energy given. The incoming particles are
366 assumed to be oriented along the z axis, with particle 1 along the
367 positive z axis.
368 For the CMS check, one must be able to chose the mass of the special
369 resonance particle with id = -1, and the special_mass option allows
370 to specify it."""
371
372 if not options:
373 energy=1000
374 events=None
375 else:
376 energy = options['energy']
377 events = options['events']
378 to_skip = 0
379
380 if not (isinstance(process, base_objects.Process) and \
381 isinstance(energy, (float,int))):
382 raise rambo.RAMBOError, "Not correct type for arguments to get_momenta"
383
384
385 sorted_legs = sorted(process.get('legs'), lambda l1, l2:\
386 l1.get('number') - l2.get('number'))
387
388
389 if events:
390 ids = [l.get('id') for l in sorted_legs]
391 import MadSpin.decay as madspin
392 if not hasattr(self, 'event_file'):
393 fsock = open(events)
394 self.event_file = madspin.Event(fsock)
395
396 skip = 0
397 while self.event_file.get_next_event() != 'no_event':
398 event = self.event_file.particle
399
400 event_ids = [p['pid'] for p in event.values()]
401 if event_ids == ids:
402 skip += 1
403 if skip > to_skip:
404 break
405 else:
406 raise MadGraph5Error, 'No compatible events for %s' % ids
407 p = []
408 for part in event.values():
409 m = part['momentum']
410 p.append([m.E, m.px, m.py, m.pz])
411 return p, 1
412
413 nincoming = len([leg for leg in sorted_legs if leg.get('state') == False])
414 nfinal = len(sorted_legs) - nincoming
415
416
417 mass = []
418 for l in sorted_legs:
419 if l.get('id') != 0:
420 mass_string = self.full_model.get_particle(l.get('id')).get('mass')
421 mass.append(self.full_model.get('parameter_dict')[mass_string].real)
422 else:
423 if isinstance(special_mass, float):
424 mass.append(special_mass)
425 else:
426 raise Exception, "A 'special_mass' option must be specified"+\
427 " in get_momenta when a leg with id=-10 is present (for CMS check)"
428
429
430
431
432
433
434
435
436
437 energy = max(energy, sum(mass[:nincoming])*1.2,sum(mass[nincoming:])*1.2)
438
439
440
441
442
443
444
445 if nfinal == 1:
446 p = []
447 energy = mass[-1]
448 p.append([energy/2,0,0,energy/2])
449 p.append([energy/2,0,0,-energy/2])
450 p.append([mass[-1],0,0,0])
451 return p, 1.0
452
453 e2 = energy**2
454 m1 = mass[0]
455 p = []
456
457 masses = rambo.FortranList(nfinal)
458 for i in range(nfinal):
459 masses[i+1] = mass[nincoming + i]
460
461 if nincoming == 1:
462
463 p.append([abs(m1), 0., 0., 0.])
464 p_rambo, w_rambo = rambo.RAMBO(nfinal, abs(m1), masses)
465
466 for i in range(1, nfinal+1):
467 momi = [p_rambo[(4,i)], p_rambo[(1,i)],
468 p_rambo[(2,i)], p_rambo[(3,i)]]
469 p.append(momi)
470
471 return p, w_rambo
472
473 if nincoming != 2:
474 raise rambo.RAMBOError('Need 1 or 2 incoming particles')
475
476 if nfinal == 1:
477 energy = masses[1]
478 if masses[1] == 0.0:
479 raise rambo.RAMBOError('The kinematic 2 > 1 with the final'+\
480 ' state particle massless is invalid')
481
482 e2 = energy**2
483 m2 = mass[1]
484
485 mom = math.sqrt((e2**2 - 2*e2*m1**2 + m1**4 - 2*e2*m2**2 - \
486 2*m1**2*m2**2 + m2**4) / (4*e2))
487 e1 = math.sqrt(mom**2+m1**2)
488 e2 = math.sqrt(mom**2+m2**2)
489
490 p.append([e1, 0., 0., mom])
491 p.append([e2, 0., 0., -mom])
492
493 if nfinal == 1:
494 p.append([energy, 0., 0., 0.])
495 return p, 1.
496
497 p_rambo, w_rambo = rambo.RAMBO(nfinal, energy, masses)
498
499
500 for i in range(1, nfinal+1):
501 momi = [p_rambo[(4,i)], p_rambo[(1,i)],
502 p_rambo[(2,i)], p_rambo[(3,i)]]
503 p.append(momi)
504
505 return p, w_rambo
506
512 """Class taking care of matrix element evaluation for loop processes."""
513
514 - def __init__(self,cuttools_dir=None, output_path=None, tir_dir={},
515 cmd=FakeInterface(),*args,**kwargs):
516 """Allow for initializing the MG5 root where the temporary fortran
517 output for checks is placed."""
518
519 super(LoopMatrixElementEvaluator,self).__init__(*args,cmd=cmd,**kwargs)
520
521 self.mg_root=self.cmd._mgme_dir
522
523 if output_path is None:
524 self.output_path = self.cmd._mgme_dir
525 else:
526 self.output_path = output_path
527
528 self.cuttools_dir=cuttools_dir
529 self.tir_dir=tir_dir
530 self.loop_optimized_output = cmd.options['loop_optimized_output']
531
532
533 self.proliferate=True
534
535
536
537
538 - def evaluate_matrix_element(self, matrix_element, p=None, options=None,
539 gauge_check=False, auth_skipping=None, output='m2',
540 PS_name = None, MLOptions={}):
541 """Calculate the matrix element and evaluate it for a phase space point
542 Output can only be 'm2. The 'jamp' and 'amp' returned values are just
543 empty lists at this point.
544 If PS_name is not none the written out PS.input will be saved in
545 the file PS.input_<PS_name> as well."""
546
547 process = matrix_element.get('processes')[0]
548 model = process.get('model')
549
550 if options and 'split_orders' in options.keys():
551 split_orders = options['split_orders']
552 else:
553 split_orders = -1
554
555 if "loop_matrix_elements" not in self.stored_quantities:
556 self.stored_quantities['loop_matrix_elements'] = []
557
558 if (auth_skipping or self.auth_skipping) and matrix_element in \
559 [el[0] for el in self.stored_quantities['loop_matrix_elements']]:
560
561 logger.info("Skipping %s, " % process.nice_string() + \
562 "identical matrix element already tested" )
563 return None
564
565
566 if not p:
567 p, w_rambo = self.get_momenta(process, options=options)
568
569 if matrix_element in [el[0] for el in \
570 self.stored_quantities['loop_matrix_elements']]:
571 export_dir=self.stored_quantities['loop_matrix_elements'][\
572 [el[0] for el in self.stored_quantities['loop_matrix_elements']\
573 ].index(matrix_element)][1]
574 logger.debug("Reusing generated output %s"%str(export_dir))
575 else:
576 export_dir=pjoin(self.output_path,temp_dir_prefix)
577 if os.path.isdir(export_dir):
578 if not self.proliferate:
579 raise InvalidCmd("The directory %s already exist. Please remove it."%str(export_dir))
580 else:
581 id=1
582 while os.path.isdir(pjoin(self.output_path,\
583 '%s_%i'%(temp_dir_prefix,id))):
584 id+=1
585 export_dir=pjoin(self.output_path,'%s_%i'%(temp_dir_prefix,id))
586
587 if self.proliferate:
588 self.stored_quantities['loop_matrix_elements'].append(\
589 (matrix_element,export_dir))
590
591
592
593 import madgraph.loop.loop_exporters as loop_exporters
594 if self.loop_optimized_output:
595 exporter_class=loop_exporters.LoopProcessOptimizedExporterFortranSA
596 else:
597 exporter_class=loop_exporters.LoopProcessExporterFortranSA
598
599 MLoptions = {'clean': True,
600 'complex_mass': self.cmass_scheme,
601 'export_format':'madloop',
602 'mp':True,
603 'SubProc_prefix':'P',
604 'compute_color_flows': not process.get('has_born'),
605 'loop_dir': pjoin(self.mg_root,'Template','loop_material'),
606 'cuttools_dir': self.cuttools_dir,
607 'fortran_compiler': self.cmd.options['fortran_compiler'],
608 'output_dependencies': self.cmd.options['output_dependencies']}
609
610 MLoptions.update(self.tir_dir)
611
612 FortranExporter = exporter_class(export_dir, MLoptions)
613 FortranModel = helas_call_writers.FortranUFOHelasCallWriter(model)
614 FortranExporter.copy_template(model)
615 FortranExporter.generate_subprocess_directory(matrix_element, FortranModel)
616 wanted_lorentz = list(set(matrix_element.get_used_lorentz()))
617 wanted_couplings = list(set([c for l in matrix_element.get_used_couplings() \
618 for c in l]))
619 FortranExporter.convert_model(model,wanted_lorentz,wanted_couplings)
620 FortranExporter.finalize(matrix_element,"",self.cmd.options, ['nojpeg'])
621
622 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
623 split_orders=split_orders)
624
625 self.fix_MadLoopParamCard(pjoin(export_dir,'Cards'),
626 mp = gauge_check and self.loop_optimized_output, MLOptions=MLOptions)
627
628 if gauge_check:
629 file_path, orig_file_content, new_file_content = \
630 self.setup_ward_check(pjoin(export_dir,'SubProcesses'),
631 ['helas_calls_ampb_1.f','loop_matrix.f'])
632 file = open(file_path,'w')
633 file.write(new_file_content)
634 file.close()
635 if self.loop_optimized_output:
636 mp_file_path, mp_orig_file_content, mp_new_file_content = \
637 self.setup_ward_check(pjoin(export_dir,'SubProcesses'),
638 ['mp_helas_calls_ampb_1.f','mp_compute_loop_coefs.f'],mp=True)
639 mp_file = open(mp_file_path,'w')
640 mp_file.write(mp_new_file_content)
641 mp_file.close()
642
643
644 finite_m2 = self.get_me_value(process.shell_string_v4(), 0,\
645 export_dir, p, PS_name = PS_name, verbose=False)[0][0]
646
647
648 if gauge_check:
649 file = open(file_path,'w')
650 file.write(orig_file_content)
651 file.close()
652 if self.loop_optimized_output:
653 mp_file = open(mp_file_path,'w')
654 mp_file.write(mp_orig_file_content)
655 mp_file.close()
656
657
658 if not self.proliferate:
659 shutil.rmtree(export_dir)
660
661 if output == "m2":
662
663
664 return finite_m2, []
665 else:
666 return {'m2': finite_m2, output:[]}
667
668 - def fix_MadLoopParamCard(self,dir_name, mp=False, loop_filter=False,
669 DoubleCheckHelicityFilter=False, MLOptions={}):
670 """ Set parameters in MadLoopParams.dat suited for these checks.MP
671 stands for multiple precision and can either be a bool or an integer
672 to specify the mode."""
673
674
675 file = open(pjoin(dir_name,'MadLoopParams.dat'), 'r')
676 MLCard = bannermod.MadLoopParam(file)
677
678 if isinstance(mp,bool):
679 mode = 4 if mp else 1
680 else:
681 mode = mp
682
683 for key, value in MLOptions.items():
684 if key == "MLReductionLib":
685 if isinstance(value, int):
686 ml_reds = str(value)
687 if isinstance(value,list):
688 if len(value)==0:
689 ml_reds = '1'
690 else:
691 ml_reds="|".join([str(vl) for vl in value])
692 elif isinstance(value, str):
693 ml_reds = value
694 elif isinstance(value, int):
695 ml_reds = str(value)
696 else:
697 raise MadGraph5Error, 'The argument %s '%str(value)+\
698 ' in fix_MadLoopParamCard must be a string, integer'+\
699 ' or a list.'
700 MLCard.set("MLReductionLib",ml_reds)
701 elif key == 'ImprovePS':
702 MLCard.set('ImprovePSPoint',2 if value else -1)
703 elif key == 'ForceMP':
704 mode = 4
705 elif key in MLCard:
706 MLCard.set(key,value)
707 else:
708 raise Exception, 'The MadLoop options %s specified in function'%key+\
709 ' fix_MadLoopParamCard does not correspond to an option defined'+\
710 ' MadLoop nor is it specially handled in this function.'
711 if not mode is None:
712 MLCard.set('CTModeRun',mode)
713 MLCard.set('CTModeInit',mode)
714 MLCard.set('UseLoopFilter',loop_filter)
715 MLCard.set('DoubleCheckHelicityFilter',DoubleCheckHelicityFilter)
716
717 MLCard.write(pjoin(dir_name,os.pardir,'SubProcesses','MadLoopParams.dat'))
718
719 @classmethod
720 - def get_me_value(cls, proc, proc_id, working_dir, PSpoint=[], PS_name = None,
721 verbose=True, format='tuple', skip_compilation=False):
722 """Compile and run ./check, then parse the output and return the result
723 for process with id = proc_id and PSpoint if specified.
724 If PS_name is not none the written out PS.input will be saved in
725 the file PS.input_<PS_name> as well"""
726 if verbose:
727 sys.stdout.write('.')
728 sys.stdout.flush()
729
730 shell_name = None
731 directories = misc.glob('P%i_*' % proc_id, pjoin(working_dir, 'SubProcesses'))
732 if directories and os.path.isdir(directories[0]):
733 shell_name = os.path.basename(directories[0])
734
735
736 if not shell_name:
737 logging.info("Directory hasn't been created for process %s" %proc)
738 return ((0.0, 0.0, 0.0, 0.0, 0), [])
739
740 if verbose: logging.debug("Working on process %s in dir %s" % (proc, shell_name))
741
742 dir_name = pjoin(working_dir, 'SubProcesses', shell_name)
743 if not skip_compilation:
744
745 if os.path.isfile(pjoin(dir_name,'check')):
746 os.remove(pjoin(dir_name,'check'))
747 try:
748 os.remove(pjoin(dir_name,'check_sa.o'))
749 os.remove(pjoin(dir_name,'loop_matrix.o'))
750 except OSError:
751 pass
752
753 devnull = open(os.devnull, 'w')
754 retcode = subprocess.call(['make','check'],
755 cwd=dir_name, stdout=devnull, stderr=devnull)
756 devnull.close()
757
758 if retcode != 0:
759 logging.info("Error while executing make in %s" % shell_name)
760 return ((0.0, 0.0, 0.0, 0.0, 0), [])
761
762
763 if PSpoint:
764 misc.write_PS_input(pjoin(dir_name, 'PS.input'),PSpoint)
765
766
767 if not PS_name is None:
768 misc.write_PS_input(pjoin(dir_name, \
769 'PS.input_%s'%PS_name),PSpoint)
770
771 try:
772 output = subprocess.Popen('./check',
773 cwd=dir_name,
774 stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout
775 output.read()
776 output.close()
777 if os.path.exists(pjoin(dir_name,'result.dat')):
778 return cls.parse_check_output(file(pjoin(dir_name,\
779 'result.dat')),format=format)
780 else:
781 logging.warning("Error while looking for file %s"%str(os.path\
782 .join(dir_name,'result.dat')))
783 return ((0.0, 0.0, 0.0, 0.0, 0), [])
784 except IOError:
785 logging.warning("Error while executing ./check in %s" % shell_name)
786 return ((0.0, 0.0, 0.0, 0.0, 0), [])
787
788 @classmethod
790 """Parse the output string and return a pair where first four values are
791 the finite, born, single and double pole of the ME and the fourth is the
792 GeV exponent and the second value is a list of 4 momenta for all particles
793 involved. Return the answer in two possible formats, 'tuple' or 'dict'."""
794
795 res_dict = {'res_p':[],
796 'born':0.0,
797 'finite':0.0,
798 '1eps':0.0,
799 '2eps':0.0,
800 'gev_pow':0,
801 'export_format':'Default',
802 'accuracy':0.0,
803 'return_code':0,
804 'Split_Orders_Names':[],
805 'Loop_SO_Results':[],
806 'Born_SO_Results':[],
807 'Born_kept':[],
808 'Loop_kept':[]
809 }
810 res_p = []
811
812
813
814 if isinstance(output,file) or isinstance(output,list):
815 text=output
816 elif isinstance(output,str):
817 text=output.split('\n')
818 else:
819 raise MadGraph5Error, 'Type for argument output not supported in'+\
820 ' parse_check_output.'
821 for line in text:
822 splitline=line.split()
823 if len(splitline)==0:
824 continue
825 elif splitline[0]=='PS':
826 res_p.append([float(s) for s in splitline[1:]])
827 elif splitline[0]=='ASO2PI':
828 res_dict['alphaS_over_2pi']=float(splitline[1])
829 elif splitline[0]=='BORN':
830 res_dict['born']=float(splitline[1])
831 elif splitline[0]=='FIN':
832 res_dict['finite']=float(splitline[1])
833 elif splitline[0]=='1EPS':
834 res_dict['1eps']=float(splitline[1])
835 elif splitline[0]=='2EPS':
836 res_dict['2eps']=float(splitline[1])
837 elif splitline[0]=='EXP':
838 res_dict['gev_pow']=int(splitline[1])
839 elif splitline[0]=='Export_Format':
840 res_dict['export_format']=splitline[1]
841 elif splitline[0]=='ACC':
842 res_dict['accuracy']=float(splitline[1])
843 elif splitline[0]=='RETCODE':
844 res_dict['return_code']=int(splitline[1])
845 elif splitline[0]=='Split_Orders_Names':
846 res_dict['Split_Orders_Names']=splitline[1:]
847 elif splitline[0] in ['Born_kept', 'Loop_kept']:
848 res_dict[splitline[0]] = [kept=='T' for kept in splitline[1:]]
849 elif splitline[0] in ['Loop_SO_Results', 'Born_SO_Results']:
850
851
852
853
854 res_dict[splitline[0]].append(\
855 ([int(el) for el in splitline[1:]],{}))
856 elif splitline[0]=='SO_Loop':
857 res_dict['Loop_SO_Results'][-1][1][splitline[1]]=\
858 float(splitline[2])
859 elif splitline[0]=='SO_Born':
860 res_dict['Born_SO_Results'][-1][1][splitline[1]]=\
861 float(splitline[2])
862
863 res_dict['res_p'] = res_p
864
865 if format=='tuple':
866 return ((res_dict['finite'],res_dict['born'],res_dict['1eps'],
867 res_dict['2eps'],res_dict['gev_pow']), res_dict['res_p'])
868 else:
869 return res_dict
870
871 @staticmethod
873 """ Changes the file model_functions.f in the SOURCE of the process output
874 so as to change how logarithms are analytically continued and see how
875 it impacts the CMS check."""
876 valid_modes = ['default','recompile']
877 if not (mode in valid_modes or (isinstance(mode, list) and
878 len(mode)==2 and all(m in ['logp','logm','log'] for m in mode))):
879 raise MadGraph5Error("Mode '%s' not reckonized"%mode+
880 " in function apply_log_tweak.")
881
882 model_path = pjoin(proc_path,'Source','MODEL')
883 directories = misc.glob('P0_*', pjoin(proc_path,'SubProcesses'))
884 if directories and os.path.isdir(directories[0]):
885 exe_path = directories[0]
886 else:
887 raise MadGraph5Error, 'Could not find a process executable '+\
888 'directory in %s'%proc_dir
889 bu_path = pjoin(model_path, 'model_functions.f__backUp__')
890
891 if mode=='default':
892
893 if not os.path.isfile(bu_path):
894 raise MadGraph5Error, 'Back up file %s could not be found.'%bu_path
895 shutil.move(bu_path, pjoin(model_path, 'model_functions.f'))
896 return
897
898 if mode=='recompile':
899 try:
900 os.remove(pjoin(model_path,'model_functions.o'))
901 os.remove(pjoin(proc_path,'lib','libmodel.a'))
902 except:
903 pass
904 misc.compile(cwd=model_path)
905
906 try:
907 os.remove(pjoin(exe_path,'check'))
908 except:
909 pass
910 misc.compile(arg=['check'], cwd=exe_path)
911 return
912
913 if mode[0]==mode[1]:
914 return
915
916
917 mp_prefix = 'MP_'
918 target_line = 'FUNCTION %%sREG%s(ARG)'%mode[0].lower()
919
920
921 if not os.path.isfile(bu_path):
922 shutil.copy(pjoin(model_path, 'model_functions.f'), bu_path)
923 model_functions = open(pjoin(model_path,'model_functions.f'),'r')
924
925 new_model_functions = []
926 has_replaced = False
927 just_replaced = False
928 find_one_replacement= False
929 mp_mode = None
930 suffix = {'log':'','logp':r'\s*\+\s*TWOPII','logm':r'\s*\-\s*TWOPII'}
931 replace_regex=r'^\s*%%sREG%s\s*=\s*LOG\(ARG\)%s'%(mode[0],suffix[mode[0]])
932 for line in model_functions:
933
934 if just_replaced:
935 if not re.match(r'\s{6}', line):
936 continue
937 else:
938 just_replaced = False
939 if mp_mode is None:
940
941 new_model_functions.append(line)
942 if (target_line%mp_prefix).lower() in line.lower():
943 mp_mode = mp_prefix
944 elif (target_line%'').lower() in line.lower():
945 mp_mode = ''
946 else:
947
948 if not has_replaced and re.match(replace_regex%mp_mode,line,
949 re.IGNORECASE):
950
951 if mode[0]=='log':
952 if mp_mode=='':
953 new_line =\
954 """ if(dble(arg).lt.0.0d0.and.dimag(arg).gt.0.0d0)then
955 reg%s=log(arg) %s TWOPII
956 else
957 reg%s=log(arg)
958 endif\n"""%(mode[0],'+' if mode[1]=='logp' else '-',mode[0])
959 else:
960 new_line =\
961 """ if(real(arg,kind=16).lt.0.0e0_16.and.imagpart(arg).lt.0.0e0_16)then
962 mp_reg%s=log(arg) %s TWOPII
963 else
964 mp_reg%s=log(arg)
965 endif\n"""%(mode[0],'+' if mode[1]=='logp' else '-',mode[0])
966 else:
967 new_line = ' '*6+"%sreg%s=log(arg) %s\n"%(mp_mode,mode[0],
968 ('' if mode[1]=='log' else ('+TWOPII' if mode[1]=='logp' else '-TWOPII')))
969 new_model_functions.append(new_line)
970 just_replaced = True
971 has_replaced = True
972 find_one_replacement = True
973 else:
974 new_model_functions.append(line)
975 if re.match(r'^\s*END\s*$',line,re.IGNORECASE):
976 mp_mode = None
977 has_replaced = False
978
979 if not find_one_replacement:
980 logger.warning('No replacement was found/performed for token '+
981 "'%s->%s'."%(mode[0],mode[1]))
982 else:
983 open(pjoin(model_path,'model_functions.f'),'w').\
984 write(''.join(new_model_functions))
985 return
986
988 """ Modify loop_matrix.f so to have one external massless gauge boson
989 polarization vector turned into its momentum. It is not a pretty and
990 flexible solution but it works for this particular case."""
991
992 shell_name = None
993 directories = misc.glob('P0_*', working_dir)
994 if directories and os.path.isdir(directories[0]):
995 shell_name = os.path.basename(directories[0])
996
997 dir_name = pjoin(working_dir, shell_name)
998
999
1000 ind=0
1001 while ind<len(file_names) and not os.path.isfile(pjoin(dir_name,
1002 file_names[ind])):
1003 ind += 1
1004 if ind==len(file_names):
1005 raise Exception, "No helas calls output file found."
1006
1007 helas_file_name=pjoin(dir_name,file_names[ind])
1008 file = open(pjoin(dir_name,helas_file_name), 'r')
1009
1010 helas_calls_out=""
1011 original_file=""
1012 gaugeVectorRegExp=re.compile(\
1013 r"CALL (MP\_)?VXXXXX\(P\(0,(?P<p_id>\d+)\),((D)?CMPLX\()?ZERO((,KIND\=16)?\))?,"+
1014 r"NHEL\(\d+\),[\+\-]1\*IC\(\d+\),W\(1,(?P<wf_id>\d+(,H)?)\)\)")
1015 foundGauge=False
1016
1017 for line in file:
1018 helas_calls_out+=line
1019 original_file+=line
1020 if line.find("INCLUDE 'coupl.inc'") != -1 or \
1021 line.find("INCLUDE 'mp_coupl_same_name.inc'") !=-1:
1022 helas_calls_out+=" INTEGER WARDINT\n"
1023 if not foundGauge:
1024 res=gaugeVectorRegExp.search(line)
1025 if res!=None:
1026 foundGauge=True
1027 helas_calls_out+=" DO WARDINT=1,4\n"
1028 helas_calls_out+=" W(WARDINT+4,"+res.group('wf_id')+")="
1029 if not mp:
1030 helas_calls_out+=\
1031 "DCMPLX(P(WARDINT-1,"+res.group('p_id')+"),0.0D0)\n"
1032 else:
1033 helas_calls_out+="CMPLX(P(WARDINT-1,"+\
1034 res.group('p_id')+"),0.0E0_16,KIND=16)\n"
1035 helas_calls_out+=" ENDDO\n"
1036 file.close()
1037
1038 return pjoin(dir_name,helas_file_name), original_file, helas_calls_out
1039
1044 """Class taking care of matrix element evaluation and running timing for
1045 loop processes."""
1046
1050
1051 @classmethod
1053 """ Return a dictionary of the parameter of the MadLoopParamCard.
1054 The key is the name of the parameter and the value is the corresponding
1055 string read from the card."""
1056
1057 return bannermod.MadLoopParam(MLCardPath)
1058
1059
1060 @classmethod
1062 """ Set the parameters in MadLoopParamCard to the values specified in
1063 the dictionary params.
1064 The key is the name of the parameter and the value is the corresponding
1065 string to write in the card."""
1066
1067 MLcard = bannermod.MadLoopParam(MLCardPath)
1068 for key,value in params.items():
1069 MLcard.set(key, value, changeifuserset=False)
1070 MLcard.write(MLCardPath, commentdefault=True)
1071
1073 """ Edit loop_matrix.f in order to skip the loop evaluation phase.
1074 Notice this only affects the double precision evaluation which is
1075 normally fine as we do not make the timing check on mp."""
1076
1077 file = open(pjoin(dir_name,'loop_matrix.f'), 'r')
1078 loop_matrix = file.read()
1079 file.close()
1080
1081 file = open(pjoin(dir_name,'loop_matrix.f'), 'w')
1082 loop_matrix = re.sub(r"SKIPLOOPEVAL=\S+\)","SKIPLOOPEVAL=%s)"%('.TRUE.'
1083 if skip else '.FALSE.'), loop_matrix)
1084 file.write(loop_matrix)
1085 file.close()
1086
1088 """ Edit loop_matrix.f in order to set the flag which stops the
1089 execution after booting the program (i.e. reading the color data)."""
1090
1091 file = open(pjoin(dir_name,'loop_matrix.f'), 'r')
1092 loop_matrix = file.read()
1093 file.close()
1094
1095 file = open(pjoin(dir_name,'loop_matrix.f'), 'w')
1096 loop_matrix = re.sub(r"BOOTANDSTOP=\S+\)","BOOTANDSTOP=%s)"%('.TRUE.'
1097 if bootandstop else '.FALSE.'), loop_matrix)
1098 file.write(loop_matrix)
1099 file.close()
1100
1101 - def setup_process(self, matrix_element, export_dir, reusing = False,
1102 param_card = None, MLOptions={},clean=True):
1103 """ Output the matrix_element in argument and perform the initialization
1104 while providing some details about the output in the dictionary returned.
1105 Returns None if anything fails"""
1106
1107 infos={'Process_output': None,
1108 'HELAS_MODEL_compilation' : None,
1109 'dir_path' : None,
1110 'Initialization' : None,
1111 'Process_compilation' : None}
1112
1113 if not reusing and clean:
1114 if os.path.isdir(export_dir):
1115 clean_up(self.output_path)
1116 if os.path.isdir(export_dir):
1117 raise InvalidCmd(\
1118 "The directory %s already exist. Please remove it."\
1119 %str(export_dir))
1120 else:
1121 if not os.path.isdir(export_dir):
1122 raise InvalidCmd(\
1123 "Could not find the directory %s to reuse."%str(export_dir))
1124
1125
1126 if not reusing and clean:
1127 model = matrix_element['processes'][0].get('model')
1128
1129
1130 import madgraph.loop.loop_exporters as loop_exporters
1131 if self.loop_optimized_output:
1132 exporter_class=loop_exporters.LoopProcessOptimizedExporterFortranSA
1133 else:
1134 exporter_class=loop_exporters.LoopProcessExporterFortranSA
1135
1136 MLoptions = {'clean': True,
1137 'complex_mass': self.cmass_scheme,
1138 'export_format':'madloop',
1139 'mp':True,
1140 'SubProc_prefix':'P',
1141 'compute_color_flows':not matrix_element['processes'][0].get('has_born'),
1142 'loop_dir': pjoin(self.mg_root,'Template','loop_material'),
1143 'cuttools_dir': self.cuttools_dir,
1144 'fortran_compiler':self.cmd.options['fortran_compiler'],
1145 'output_dependencies':self.cmd.options['output_dependencies']}
1146
1147 MLoptions.update(self.tir_dir)
1148
1149 start=time.time()
1150 FortranExporter = exporter_class(export_dir, MLoptions)
1151 FortranModel = helas_call_writers.FortranUFOHelasCallWriter(model)
1152 FortranExporter.copy_template(model)
1153 FortranExporter.generate_subprocess_directory(matrix_element, FortranModel)
1154 wanted_lorentz = list(set(matrix_element.get_used_lorentz()))
1155 wanted_couplings = list(set([c for l in matrix_element.get_used_couplings() \
1156 for c in l]))
1157 FortranExporter.convert_model(self.full_model,wanted_lorentz,wanted_couplings)
1158 infos['Process_output'] = time.time()-start
1159 start=time.time()
1160 FortranExporter.finalize(matrix_element,"",self.cmd.options, ['nojpeg'])
1161 infos['HELAS_MODEL_compilation'] = time.time()-start
1162
1163
1164 if param_card != None:
1165 if isinstance(param_card, str):
1166 cp(pjoin(param_card),\
1167 pjoin(export_dir,'Cards','param_card.dat'))
1168 else:
1169 param_card.write(pjoin(export_dir,'Cards','param_card.dat'))
1170
1171
1172
1173 MadLoopInitializer.fix_PSPoint_in_check(
1174 pjoin(export_dir,'SubProcesses'), read_ps = False, npoints = 4)
1175
1176 self.fix_MadLoopParamCard(pjoin(export_dir,'Cards'),
1177 mp = False, loop_filter = True,MLOptions=MLOptions)
1178
1179 shell_name = None
1180 directories = misc.glob('P0_*', pjoin(export_dir, 'SubProcesses'))
1181 if directories and os.path.isdir(directories[0]):
1182 shell_name = os.path.basename(directories[0])
1183 dir_name = pjoin(export_dir, 'SubProcesses', shell_name)
1184 infos['dir_path']=dir_name
1185
1186
1187
1188 if not MadLoopInitializer.need_MadLoopInit(
1189 export_dir, subproc_prefix='P'):
1190 return infos
1191
1192 attempts = [3,15]
1193
1194 try:
1195 os.remove(pjoin(dir_name,'check'))
1196 os.remove(pjoin(dir_name,'check_sa.o'))
1197 except OSError:
1198 pass
1199
1200 nPS_necessary = MadLoopInitializer.run_initialization(dir_name,
1201 pjoin(export_dir,'SubProcesses'),infos,\
1202 req_files = ['HelFilter.dat','LoopFilter.dat'],
1203 attempts = attempts)
1204 if attempts is None:
1205 logger.error("Could not compile the process %s,"%shell_name+\
1206 " try to generate it via the 'generate' command.")
1207 return None
1208 if nPS_necessary is None:
1209 logger.error("Could not initialize the process %s"%shell_name+\
1210 " with %s PS points."%max(attempts))
1211 return None
1212 elif nPS_necessary > min(attempts):
1213 logger.warning("Could not initialize the process %s"%shell_name+\
1214 " with %d PS points. It needed %d."%(min(attempts),nPS_necessary))
1215
1216 return infos
1217
1218 - def time_matrix_element(self, matrix_element, reusing = False,
1219 param_card = None, keep_folder = False, options=None,
1220 MLOptions = {}):
1221 """ Output the matrix_element in argument and give detail information
1222 about the timing for its output and running"""
1223
1224
1225
1226 make_it_quick=False
1227
1228 if options and 'split_orders' in options.keys():
1229 split_orders = options['split_orders']
1230 else:
1231 split_orders = -1
1232
1233 assert ((not reusing and isinstance(matrix_element, \
1234 helas_objects.HelasMatrixElement)) or (reusing and
1235 isinstance(matrix_element, base_objects.Process)))
1236 if not reusing:
1237 proc_name = matrix_element['processes'][0].shell_string()[2:]
1238 else:
1239 proc_name = matrix_element.shell_string()[2:]
1240
1241 export_dir=pjoin(self.output_path,('SAVED' if keep_folder else '')+\
1242 temp_dir_prefix+"_%s"%proc_name)
1243
1244 res_timings = self.setup_process(matrix_element,export_dir, \
1245 reusing, param_card,MLOptions = MLOptions,clean=True)
1246
1247 if res_timings == None:
1248 return None
1249 dir_name=res_timings['dir_path']
1250
1251 def check_disk_usage(path):
1252 return subprocess.Popen("du -shc -L "+str(path), \
1253 stdout=subprocess.PIPE, shell=True).communicate()[0].split()[-2]
1254
1255
1256
1257
1258 res_timings['du_source']=check_disk_usage(pjoin(\
1259 export_dir,'Source','*','*.f'))
1260 res_timings['du_process']=check_disk_usage(pjoin(dir_name,'*.f'))
1261 res_timings['du_color']=check_disk_usage(pjoin(dir_name,
1262 'MadLoop5_resources','*.dat'))
1263 res_timings['du_exe']=check_disk_usage(pjoin(dir_name,'check'))
1264
1265 if not res_timings['Initialization']==None:
1266 time_per_ps_estimate = (res_timings['Initialization']/4.0)/2.0
1267 elif make_it_quick:
1268 time_per_ps_estimate = -1.0
1269 else:
1270
1271
1272 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1273 read_ps = False, npoints = 3, hel_config = -1,
1274 split_orders=split_orders)
1275 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1276 time_per_ps_estimate = run_time/3.0
1277
1278 self.boot_time_setup(dir_name,bootandstop=True)
1279 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1280 res_timings['Booting_time'] = run_time
1281 self.boot_time_setup(dir_name,bootandstop=False)
1282
1283
1284 contributing_hel=0
1285 n_contrib_hel=0
1286 proc_prefix_file = open(pjoin(dir_name,'proc_prefix.txt'),'r')
1287 proc_prefix = proc_prefix_file.read()
1288 proc_prefix_file.close()
1289 helicities = file(pjoin(dir_name,'MadLoop5_resources',
1290 '%sHelFilter.dat'%proc_prefix)).read().split()
1291 for i, hel in enumerate(helicities):
1292 if (self.loop_optimized_output and int(hel)>-10000) or hel=='T':
1293 if contributing_hel==0:
1294 contributing_hel=i+1
1295 n_contrib_hel += 1
1296
1297 if contributing_hel==0:
1298 logger.error("Could not find a contributing helicity "+\
1299 "configuration for process %s."%proc_name)
1300 return None
1301
1302 res_timings['n_contrib_hel']=n_contrib_hel
1303 res_timings['n_tot_hel']=len(helicities)
1304
1305
1306 if not make_it_quick:
1307 target_pspoints_number = max(int(30.0/time_per_ps_estimate)+1,50)
1308 else:
1309 target_pspoints_number = 10
1310
1311 logger.info("Checking timing for process %s "%proc_name+\
1312 "with %d PS points."%target_pspoints_number)
1313
1314 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1315 read_ps = False, npoints = target_pspoints_number*2, \
1316 hel_config = contributing_hel, split_orders=split_orders)
1317 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1318
1319 if compile_time == None: return None
1320
1321 res_timings['run_polarized_total']=\
1322 (run_time-res_timings['Booting_time'])/(target_pspoints_number*2)
1323
1324 if make_it_quick:
1325 res_timings['run_unpolarized_total'] = 1.0
1326 res_timings['ram_usage'] = 0.0
1327 else:
1328 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1329 read_ps = False, npoints = target_pspoints_number, hel_config = -1,
1330 split_orders=split_orders)
1331 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name,
1332 checkRam=True)
1333
1334 if compile_time == None: return None
1335 res_timings['run_unpolarized_total']=\
1336 (run_time-res_timings['Booting_time'])/target_pspoints_number
1337 res_timings['ram_usage'] = ram_usage
1338
1339 if not self.loop_optimized_output:
1340 return res_timings
1341
1342
1343
1344
1345
1346 self.skip_loop_evaluation_setup(dir_name,skip=True)
1347
1348 if make_it_quick:
1349 res_timings['run_unpolarized_coefs'] = 1.0
1350 else:
1351 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1352 read_ps = False, npoints = target_pspoints_number, hel_config = -1,
1353 split_orders=split_orders)
1354 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1355 if compile_time == None: return None
1356 res_timings['run_unpolarized_coefs']=\
1357 (run_time-res_timings['Booting_time'])/target_pspoints_number
1358
1359 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1360 read_ps = False, npoints = target_pspoints_number*2, \
1361 hel_config = contributing_hel, split_orders=split_orders)
1362 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1363 if compile_time == None: return None
1364 res_timings['run_polarized_coefs']=\
1365 (run_time-res_timings['Booting_time'])/(target_pspoints_number*2)
1366
1367
1368 self.skip_loop_evaluation_setup(dir_name,skip=False)
1369
1370 return res_timings
1371
1372
1373
1374
1375
1376 - def check_matrix_element_stability(self, matrix_element,options=None,
1377 infos_IN = None, param_card = None, keep_folder = False,
1378 MLOptions = {}):
1379 """ Output the matrix_element in argument, run in for nPoints and return
1380 a dictionary containing the stability information on each of these points.
1381 If infos are provided, then the matrix element output is skipped and
1382 reused from a previous run and the content of infos.
1383 """
1384
1385 if not options:
1386 reusing = False
1387 nPoints = 100
1388 split_orders = -1
1389 else:
1390 reusing = options['reuse']
1391 nPoints = options['npoints']
1392 split_orders = options['split_orders']
1393
1394 assert ((not reusing and isinstance(matrix_element, \
1395 helas_objects.HelasMatrixElement)) or (reusing and
1396 isinstance(matrix_element, base_objects.Process)))
1397
1398
1399 def format_PS_point(ps, rotation=0):
1400 """ Write out the specified PS point to the file dir_path/PS.input
1401 while rotating it if rotation!=0. We consider only rotations of 90
1402 but one could think of having rotation of arbitrary angle too.
1403 The first two possibilities, 1 and 2 are a rotation and boost
1404 along the z-axis so that improve_ps can still work.
1405 rotation=0 => No rotation
1406 rotation=1 => Z-axis pi/2 rotation
1407 rotation=2 => Z-axis pi/4 rotation
1408 rotation=3 => Z-axis boost
1409 rotation=4 => (x'=z,y'=-x,z'=-y)
1410 rotation=5 => (x'=-z,y'=y,z'=x)"""
1411 if rotation==0:
1412 p_out=copy.copy(ps)
1413 elif rotation==1:
1414 p_out = [[pm[0],-pm[2],pm[1],pm[3]] for pm in ps]
1415 elif rotation==2:
1416 sq2 = math.sqrt(2.0)
1417 p_out = [[pm[0],(pm[1]-pm[2])/sq2,(pm[1]+pm[2])/sq2,pm[3]] for pm in ps]
1418 elif rotation==3:
1419 p_out = boost_momenta(ps, 3)
1420
1421
1422 elif rotation==4:
1423 p_out=[[pm[0],pm[3],-pm[1],-pm[2]] for pm in ps]
1424 elif rotation==5:
1425 p_out=[[pm[0],-pm[3],pm[2],pm[1]] for pm in ps]
1426 else:
1427 raise MadGraph5Error("Rotation id %i not implemented"%rotation)
1428
1429 return '\n'.join([' '.join(['%.16E'%pi for pi in p]) for p in p_out])
1430
1431 def pick_PS_point(proc, options):
1432 """ Randomly generate a PS point and make sure it is eligible. Then
1433 return it. Users can edit the cuts here if they want."""
1434
1435 p, w_rambo = self.get_momenta(proc, options)
1436 if options['events']:
1437 return p
1438
1439 while (not MatrixElementEvaluator.pass_isolation_cuts(p) and len(p)>3):
1440 p, w_rambo = self.get_momenta(proc, options)
1441
1442
1443
1444
1445 if len(p)==3:
1446 p = boost_momenta(p,3,random.uniform(0.0,0.99))
1447 return p
1448
1449
1450
1451
1452 accuracy_threshold=1.0e-1
1453
1454
1455
1456 num_rotations = 1
1457
1458 if "MLReductionLib" not in MLOptions:
1459 tools=[1]
1460 else:
1461 tools=MLOptions["MLReductionLib"]
1462 tools=list(set(tools))
1463
1464
1465 tool_var={'pjfry':2,'golem':4,'samurai':5,'ninja':6,'collier':7}
1466 for tool in ['pjfry','golem','samurai','ninja','collier']:
1467 tool_dir='%s_dir'%tool
1468 if not tool_dir in self.tir_dir:
1469 continue
1470 tool_libpath=self.tir_dir[tool_dir]
1471 tool_libname="lib%s.a"%tool
1472 if (not isinstance(tool_libpath,str)) or (not os.path.exists(tool_libpath)) \
1473 or (not os.path.isfile(pjoin(tool_libpath,tool_libname))):
1474 if tool_var[tool] in tools:
1475 tools.remove(tool_var[tool])
1476 if not tools:
1477 return None
1478
1479
1480 if not reusing:
1481 process = matrix_element['processes'][0]
1482 else:
1483 process = matrix_element
1484 proc_name = process.shell_string()[2:]
1485 export_dir=pjoin(self.mg_root,("SAVED" if keep_folder else "")+\
1486 temp_dir_prefix+"_%s"%proc_name)
1487
1488 tools_name=bannermod.MadLoopParam._ID_reduction_tool_map
1489
1490 return_dict={}
1491 return_dict['Stability']={}
1492 infos_save={'Process_output': None,
1493 'HELAS_MODEL_compilation' : None,
1494 'dir_path' : None,
1495 'Initialization' : None,
1496 'Process_compilation' : None}
1497
1498 for tool in tools:
1499 tool_name=tools_name[tool]
1500
1501
1502
1503
1504
1505 DP_stability = []
1506 QP_stability = []
1507
1508 Unstable_PS_points = []
1509
1510 Exceptional_PS_points = []
1511
1512 MLoptions=MLOptions
1513 MLoptions["MLReductionLib"]=tool
1514 clean = (tool==tools[0]) and not nPoints==0
1515 if infos_IN==None or (tool_name not in infos_IN):
1516 infos=infos_IN
1517 else:
1518 infos=infos_IN[tool_name]
1519
1520 if not infos:
1521 infos = self.setup_process(matrix_element,export_dir, \
1522 reusing, param_card,MLoptions,clean)
1523 if not infos:
1524 return None
1525
1526 if clean:
1527 infos_save['Process_output']=infos['Process_output']
1528 infos_save['HELAS_MODEL_compilation']=infos['HELAS_MODEL_compilation']
1529 infos_save['dir_path']=infos['dir_path']
1530 infos_save['Process_compilation']=infos['Process_compilation']
1531 else:
1532 if not infos['Process_output']:
1533 infos['Process_output']=infos_save['Process_output']
1534 if not infos['HELAS_MODEL_compilation']:
1535 infos['HELAS_MODEL_compilation']=infos_save['HELAS_MODEL_compilation']
1536 if not infos['dir_path']:
1537 infos['dir_path']=infos_save['dir_path']
1538 if not infos['Process_compilation']:
1539 infos['Process_compilation']=infos_save['Process_compilation']
1540
1541 dir_path=infos['dir_path']
1542
1543
1544 savefile='SavedStabilityRun_%s%%s.pkl'%tools_name[tool]
1545 data_i = 0
1546
1547 if reusing:
1548
1549 data_i=0
1550 while os.path.isfile(pjoin(dir_path,savefile%('_%d'%data_i))):
1551 pickle_path = pjoin(dir_path,savefile%('_%d'%data_i))
1552 saved_run = save_load_object.load_from_file(pickle_path)
1553 if data_i>0:
1554 logger.info("Loading additional data stored in %s."%
1555 str(pickle_path))
1556 logger.info("Loaded data moved to %s."%str(pjoin(
1557 dir_path,'LOADED_'+savefile%('_%d'%data_i))))
1558 shutil.move(pickle_path,
1559 pjoin(dir_path,'LOADED_'+savefile%('%d'%data_i)))
1560 DP_stability.extend(saved_run['DP_stability'])
1561 QP_stability.extend(saved_run['QP_stability'])
1562 Unstable_PS_points.extend(saved_run['Unstable_PS_points'])
1563 Exceptional_PS_points.extend(saved_run['Exceptional_PS_points'])
1564 data_i += 1
1565
1566 return_dict['Stability'][tool_name] = {'DP_stability':DP_stability,
1567 'QP_stability':QP_stability,
1568 'Unstable_PS_points':Unstable_PS_points,
1569 'Exceptional_PS_points':Exceptional_PS_points}
1570
1571 if nPoints==0:
1572 if len(return_dict['Stability'][tool_name]['DP_stability'])!=0:
1573
1574 if data_i>1:
1575 save_load_object.save_to_file(pjoin(dir_path,
1576 savefile%'_0'),return_dict['Stability'][tool_name])
1577 continue
1578 else:
1579 logger.info("ERROR: Not reusing a directory or any pickled"+
1580 " result for tool %s and the number"%tool_name+\
1581 " of point for the check is zero.")
1582 return None
1583
1584 logger.info("Checking stability of process %s "%proc_name+\
1585 "with %d PS points by %s."%(nPoints,tool_name))
1586 if infos['Initialization'] != None:
1587 time_per_ps_estimate = (infos['Initialization']/4.0)/2.0
1588 sec_needed = int(time_per_ps_estimate*nPoints*4)
1589 else:
1590 sec_needed = 0
1591
1592 progress_bar = None
1593 time_info = False
1594 if sec_needed>5:
1595 time_info = True
1596 logger.info("This check should take about "+\
1597 "%s to run. Started on %s."%(\
1598 str(datetime.timedelta(seconds=sec_needed)),\
1599 datetime.datetime.now().strftime("%d-%m-%Y %H:%M")))
1600 if logger.getEffectiveLevel()<logging.WARNING and \
1601 (sec_needed>5 or infos['Initialization'] == None):
1602 widgets = ['Stability check:', pbar.Percentage(), ' ',
1603 pbar.Bar(),' ', pbar.ETA(), ' ']
1604 progress_bar = pbar.ProgressBar(widgets=widgets, maxval=nPoints,
1605 fd=sys.stdout)
1606 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1607 read_ps = True, npoints = 1, hel_config = -1, split_orders=split_orders)
1608
1609
1610
1611 try:
1612 os.remove(pjoin(dir_path,'check'))
1613 os.remove(pjoin(dir_path,'check_sa.o'))
1614 except OSError:
1615 pass
1616
1617 devnull = open(os.devnull, 'w')
1618 retcode = subprocess.call(['make','check'],
1619 cwd=dir_path, stdout=devnull, stderr=devnull)
1620 devnull.close()
1621 if retcode != 0:
1622 logging.info("Error while executing make in %s" % dir_path)
1623 return None
1624
1625
1626
1627
1628 if not os.path.isfile(pjoin(dir_path,'StabilityCheckDriver.f')):
1629
1630
1631 if os.path.isfile(pjoin(dir_path,'born_matrix.f')):
1632 checkerName = 'StabilityCheckDriver.f'
1633 else:
1634 checkerName = 'StabilityCheckDriver_loop_induced.f'
1635
1636 with open(pjoin(self.mg_root,'Template','loop_material','Checks',
1637 checkerName),'r') as checkerFile:
1638 with open(pjoin(dir_path,'proc_prefix.txt')) as proc_prefix:
1639 checkerToWrite = checkerFile.read()%{'proc_prefix':
1640 proc_prefix.read()}
1641 checkerFile = open(pjoin(dir_path,'StabilityCheckDriver.f'),'w')
1642 checkerFile.write(checkerToWrite)
1643 checkerFile.close()
1644
1645
1646
1647
1648
1649 if os.path.isfile(pjoin(dir_path,'StabilityCheckDriver')):
1650 os.remove(pjoin(dir_path,'StabilityCheckDriver'))
1651 if os.path.isfile(pjoin(dir_path,'loop_matrix.o')):
1652 os.remove(pjoin(dir_path,'loop_matrix.o'))
1653 misc.compile(arg=['StabilityCheckDriver'], cwd=dir_path, \
1654 mode='fortran', job_specs = False)
1655
1656
1657
1658
1659 if len(process['legs'])==3:
1660 self.fix_MadLoopParamCard(dir_path, mp=False,
1661 loop_filter=False, DoubleCheckHelicityFilter=True)
1662
1663 StabChecker = subprocess.Popen([pjoin(dir_path,'StabilityCheckDriver')],
1664 stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
1665 cwd=dir_path)
1666 start_index = len(DP_stability)
1667 if progress_bar!=None:
1668 progress_bar.start()
1669
1670
1671 interrupted = False
1672
1673
1674 retry = 0
1675
1676 i=start_index
1677 if options and 'events' in options and options['events']:
1678
1679 import MadSpin.decay as madspin
1680 fsock = open(options['events'])
1681 self.event_file = madspin.Event(fsock)
1682 while i<(start_index+nPoints):
1683
1684 qp_dict={}
1685 dp_dict={}
1686 UPS = None
1687 EPS = None
1688
1689 if retry==0:
1690 p = pick_PS_point(process, options)
1691
1692 try:
1693 if progress_bar!=None:
1694 progress_bar.update(i+1-start_index)
1695
1696 PSPoint = format_PS_point(p,0)
1697 dp_res=[]
1698 dp_res.append(self.get_me_value(StabChecker,PSPoint,1,
1699 split_orders=split_orders))
1700 dp_dict['CTModeA']=dp_res[-1]
1701 dp_res.append(self.get_me_value(StabChecker,PSPoint,2,
1702 split_orders=split_orders))
1703 dp_dict['CTModeB']=dp_res[-1]
1704 for rotation in range(1,num_rotations+1):
1705 PSPoint = format_PS_point(p,rotation)
1706 dp_res.append(self.get_me_value(StabChecker,PSPoint,1,
1707 split_orders=split_orders))
1708 dp_dict['Rotation%i'%rotation]=dp_res[-1]
1709
1710 if any([not res for res in dp_res]):
1711 return None
1712 dp_accuracy =((max(dp_res)-min(dp_res))/
1713 abs(sum(dp_res)/len(dp_res)))
1714 dp_dict['Accuracy'] = dp_accuracy
1715 if dp_accuracy>accuracy_threshold:
1716 if tool in [1,6]:
1717
1718 UPS = [i,p]
1719 qp_res=[]
1720 PSPoint = format_PS_point(p,0)
1721 qp_res.append(self.get_me_value(StabChecker,PSPoint,4,
1722 split_orders=split_orders))
1723 qp_dict['CTModeA']=qp_res[-1]
1724 qp_res.append(self.get_me_value(StabChecker,PSPoint,5,
1725 split_orders=split_orders))
1726 qp_dict['CTModeB']=qp_res[-1]
1727 for rotation in range(1,num_rotations+1):
1728 PSPoint = format_PS_point(p,rotation)
1729 qp_res.append(self.get_me_value(StabChecker,PSPoint,4,
1730 split_orders=split_orders))
1731 qp_dict['Rotation%i'%rotation]=qp_res[-1]
1732
1733 if any([not res for res in qp_res]):
1734 return None
1735
1736 qp_accuracy = ((max(qp_res)-min(qp_res))/
1737 abs(sum(qp_res)/len(qp_res)))
1738 qp_dict['Accuracy']=qp_accuracy
1739 if qp_accuracy>accuracy_threshold:
1740 EPS = [i,p]
1741 else:
1742
1743
1744 UPS = [i,p]
1745
1746 except KeyboardInterrupt:
1747 interrupted = True
1748 break
1749 except IOError, e:
1750 if e.errno == errno.EINTR:
1751 if retry==100:
1752 logger.error("Failed hundred times consecutively because"+
1753 " of system call interruptions.")
1754 raise
1755 else:
1756 logger.debug("Recovered from a system call interruption."+\
1757 "PSpoint #%i, Attempt #%i."%(i,retry+1))
1758
1759 time.sleep(0.5)
1760
1761 retry = retry+1
1762
1763 try:
1764 StabChecker.kill()
1765 except Exception:
1766 pass
1767 StabChecker = subprocess.Popen(\
1768 [pjoin(dir_path,'StabilityCheckDriver')],
1769 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
1770 stderr=subprocess.PIPE, cwd=dir_path)
1771 continue
1772 else:
1773 raise
1774
1775
1776
1777 retry = 0
1778
1779 i=i+1
1780
1781
1782 DP_stability.append(dp_dict)
1783 QP_stability.append(qp_dict)
1784 if not EPS is None:
1785 Exceptional_PS_points.append(EPS)
1786 if not UPS is None:
1787 Unstable_PS_points.append(UPS)
1788
1789 if progress_bar!=None:
1790 progress_bar.finish()
1791 if time_info:
1792 logger.info('Finished check on %s.'%datetime.datetime.now().strftime(\
1793 "%d-%m-%Y %H:%M"))
1794
1795
1796 if not interrupted:
1797 StabChecker.stdin.write('y\n')
1798 else:
1799 StabChecker.kill()
1800
1801
1802
1803
1804
1805
1806
1807 save_load_object.save_to_file(pjoin(dir_path,savefile%'_0'),\
1808 return_dict['Stability'][tool_name])
1809
1810 if interrupted:
1811 break
1812
1813 return_dict['Process'] = matrix_element.get('processes')[0] if not \
1814 reusing else matrix_element
1815 return return_dict
1816
1817 @classmethod
1818 - def get_me_value(cls, StabChecker, PSpoint, mode, hel=-1, mu_r=-1.0,
1819 split_orders=-1):
1820 """ This version of get_me_value is simplified for the purpose of this
1821 class. No compilation is necessary. The CT mode can be specified."""
1822
1823
1824 StabChecker.stdin.write('\x1a')
1825 StabChecker.stdin.write('1\n')
1826 StabChecker.stdin.write('%d\n'%mode)
1827 StabChecker.stdin.write('%s\n'%PSpoint)
1828 StabChecker.stdin.write('%.16E\n'%mu_r)
1829 StabChecker.stdin.write('%d\n'%hel)
1830 StabChecker.stdin.write('%d\n'%split_orders)
1831
1832 try:
1833 while True:
1834 output = StabChecker.stdout.readline()
1835 if output != '':
1836 last_non_empty = output
1837 if output==' ##TAG#RESULT_START#TAG##\n':
1838 break
1839
1840 ret_code = StabChecker.poll()
1841 if not ret_code is None:
1842 output = StabChecker.stdout.readline()
1843 if output != '':
1844 last_non_empty = output
1845 error = StabChecker.stderr.readline()
1846 raise MadGraph5Error, \
1847 "The MadLoop stability checker crashed with return code = %d, and last output:\n\nstdout: %s\nstderr: %s\n"%\
1848 (ret_code, last_non_empty, error)
1849
1850 res = ""
1851 while True:
1852 output = StabChecker.stdout.readline()
1853 if output != '':
1854 last_non_empty = output
1855 if output==' ##TAG#RESULT_STOP#TAG##\n':
1856 break
1857 else:
1858 res += output
1859 ret_code = StabChecker.poll()
1860 if not ret_code is None:
1861 output = StabChecker.stdout.readline()
1862 if output != '':
1863 last_non_empty = output
1864 error = StabChecker.stderr.readline()
1865 raise MadGraph5Error, \
1866 "The MadLoop stability checker crashed with return code = %d, and last output:\n\nstdout: %s\nstderr: %s\n"%\
1867 (ret_code, last_non_empty, error)
1868
1869 return cls.parse_check_output(res,format='tuple')[0][0]
1870 except IOError as e:
1871 logging.warning("Error while running MadLoop. Exception = %s"%str(e))
1872 raise e
1873
1876 """ Perform a python evaluation of the matrix element independently for
1877 all possible helicity configurations for a fixed number of points N and
1878 returns the average for each in the format [[hel_config, eval],...].
1879 This is used to determine what are the vanishing and dependent helicity
1880 configurations at generation time and accordingly setup the output.
1881 This is not yet implemented at LO."""
1882
1883
1884 assert isinstance(process,base_objects.Process)
1885 assert process.get('perturbation_couplings')==[]
1886
1887 N_eval=50
1888
1889 evaluator = MatrixElementEvaluator(process.get('model'), param_card,
1890 auth_skipping = False, reuse = True)
1891
1892 amplitude = diagram_generation.Amplitude(process)
1893 matrix_element = helas_objects.HelasMatrixElement(amplitude,gen_color=False)
1894
1895 cumulative_helEvals = []
1896
1897 for i in range(N_eval):
1898 p, w_rambo = evaluator.get_momenta(process)
1899 helEvals = evaluator.evaluate_matrix_element(\
1900 matrix_element, p = p, output = 'helEvals')['helEvals']
1901 if cumulative_helEvals==[]:
1902 cumulative_helEvals=copy.copy(helEvals)
1903 else:
1904 cumulative_helEvals = [[h[0],h[1]+helEvals[i][1]] for i, h in \
1905 enumerate(cumulative_helEvals)]
1906
1907
1908 cumulative_helEvals = [[h[0],h[1]/N_eval] for h in cumulative_helEvals]
1909
1910
1911
1912 clean_added_globals(ADDED_GLOBAL)
1913
1914 return cumulative_helEvals
1915
1918 """A wrapper function for running an iteration of a function over
1919 a multiprocess, without having to first create a process list
1920 (which makes a big difference for very large multiprocesses.
1921 stored_quantities is a dictionary for any quantities that we want
1922 to reuse between runs."""
1923
1924 model = multiprocess.get('model')
1925 isids = [leg.get('ids') for leg in multiprocess.get('legs') \
1926 if not leg.get('state')]
1927 fsids = [leg.get('ids') for leg in multiprocess.get('legs') \
1928 if leg.get('state')]
1929
1930 id_anti_id_dict = {}
1931 for id in set(tuple(sum(isids+fsids, []))):
1932 id_anti_id_dict[id] = model.get_particle(id).get_anti_pdg_code()
1933 id_anti_id_dict[model.get_particle(id).get_anti_pdg_code()] = id
1934 sorted_ids = []
1935 results = []
1936 for is_prod in apply(itertools.product, isids):
1937 for fs_prod in apply(itertools.product, fsids):
1938
1939
1940 if check_already_checked(is_prod, fs_prod, sorted_ids,
1941 multiprocess, model, id_anti_id_dict):
1942 continue
1943
1944 process = multiprocess.get_process_with_legs(base_objects.LegList(\
1945 [base_objects.Leg({'id': id, 'state':False}) for \
1946 id in is_prod] + \
1947 [base_objects.Leg({'id': id, 'state':True}) for \
1948 id in fs_prod]))
1949
1950 if opt is not None:
1951 if isinstance(opt, dict):
1952 try:
1953 value = opt[process.base_string()]
1954 except Exception:
1955 continue
1956 result = function(process, stored_quantities, value, options=options)
1957 else:
1958 result = function(process, stored_quantities, opt, options=options)
1959 else:
1960 result = function(process, stored_quantities, options=options)
1961
1962 if result:
1963 results.append(result)
1964
1965 return results
1966
1967
1968
1969
1970
1971 -def check_already_checked(is_ids, fs_ids, sorted_ids, process, model,
1972 id_anti_id_dict = {}):
1973 """Check if process already checked, if so return True, otherwise add
1974 process and antiprocess to sorted_ids."""
1975
1976
1977 if id_anti_id_dict:
1978 is_ids = [id_anti_id_dict[id] for id in \
1979 is_ids]
1980 else:
1981 is_ids = [model.get_particle(id).get_anti_pdg_code() for id in \
1982 is_ids]
1983
1984 ids = array.array('i', sorted(is_ids + list(fs_ids)) + \
1985 [process.get('id')])
1986
1987 if ids in sorted_ids:
1988
1989 return True
1990
1991
1992 sorted_ids.append(ids)
1993
1994
1995 return False
1996
2002 """ Generate a loop matrix element from the process definition, and returns
2003 it along with the timing information dictionary.
2004 If reuse is True, it reuses the already output directory if found.
2005 There is the possibility of specifying the proc_name."""
2006
2007 assert isinstance(process_definition,
2008 (base_objects.ProcessDefinition,base_objects.Process))
2009 assert process_definition.get('perturbation_couplings')!=[]
2010
2011 if isinstance(process_definition,base_objects.ProcessDefinition):
2012 if any(len(l.get('ids'))>1 for l in process_definition.get('legs')):
2013 raise InvalidCmd("This check can only be performed on single "+
2014 " processes. (i.e. without multiparticle labels).")
2015
2016 isids = [leg.get('ids')[0] for leg in process_definition.get('legs') \
2017 if not leg.get('state')]
2018 fsids = [leg.get('ids')[0] for leg in process_definition.get('legs') \
2019 if leg.get('state')]
2020
2021
2022 process = process_definition.get_process(isids,fsids)
2023 else:
2024 process = process_definition
2025
2026 if not output_path is None:
2027 root_path = output_path
2028 else:
2029 root_path = cmd._mgme_dir
2030
2031 timing = {'Diagrams_generation': None,
2032 'n_loops': None,
2033 'HelasDiagrams_generation': None,
2034 'n_loop_groups': None,
2035 'n_loop_wfs': None,
2036 'loop_wfs_ranks': None}
2037
2038 if proc_name:
2039 proc_dir = pjoin(root_path,proc_name)
2040 else:
2041 proc_dir = pjoin(root_path,"SAVED"+temp_dir_prefix+"_%s"%(
2042 '_'.join(process.shell_string().split('_')[1:])))
2043 if reuse and os.path.isdir(proc_dir):
2044 logger.info("Reusing directory %s"%str(proc_dir))
2045
2046 return timing, process
2047
2048 logger.info("Generating p%s"%process_definition.nice_string()[1:])
2049
2050 start=time.time()
2051 try:
2052 amplitude = loop_diagram_generation.LoopAmplitude(process,
2053 loop_filter=loop_filter)
2054 except InvalidCmd:
2055
2056
2057 return time.time()-start, None
2058 if not amplitude.get('diagrams'):
2059
2060 return time.time()-start, None
2061
2062
2063
2064 loop_optimized_output = cmd.options['loop_optimized_output']
2065 timing['Diagrams_generation']=time.time()-start
2066 timing['n_loops']=len(amplitude.get('loop_diagrams'))
2067 start=time.time()
2068
2069 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
2070 optimized_output = loop_optimized_output,gen_color=True)
2071
2072
2073
2074 matrix_element.compute_all_analytic_information()
2075 timing['HelasDiagrams_generation']=time.time()-start
2076
2077 if loop_optimized_output:
2078 timing['n_loop_groups']=len(matrix_element.get('loop_groups'))
2079 lwfs=[l for ldiag in matrix_element.get_loop_diagrams() for l in \
2080 ldiag.get('loop_wavefunctions')]
2081 timing['n_loop_wfs']=len(lwfs)
2082 timing['loop_wfs_ranks']=[]
2083 for rank in range(0,max([l.get_analytic_info('wavefunction_rank') \
2084 for l in lwfs])+1):
2085 timing['loop_wfs_ranks'].append(\
2086 len([1 for l in lwfs if \
2087 l.get_analytic_info('wavefunction_rank')==rank]))
2088
2089 return timing, matrix_element
2090
2091
2092
2093
2094 -def check_profile(process_definition, param_card = None,cuttools="",tir={},
2095 options = {}, cmd = FakeInterface(),output_path=None,MLOptions={}):
2096 """For a single loop process, check both its timings and then its stability
2097 in one go without regenerating it."""
2098
2099 if 'reuse' not in options:
2100 keep_folder=False
2101 else:
2102 keep_folder = options['reuse']
2103
2104 model=process_definition.get('model')
2105
2106 timing1, matrix_element = generate_loop_matrix_element(process_definition,
2107 keep_folder,output_path=output_path,cmd=cmd)
2108 reusing = isinstance(matrix_element, base_objects.Process)
2109 options['reuse'] = reusing
2110 myProfiler = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
2111 model=model, output_path=output_path, cmd=cmd)
2112
2113 if not myProfiler.loop_optimized_output:
2114 MLoptions={}
2115 else:
2116 MLoptions=MLOptions
2117
2118 timing2 = myProfiler.time_matrix_element(matrix_element, reusing,
2119 param_card, keep_folder=keep_folder,options=options,
2120 MLOptions = MLoptions)
2121
2122 timing2['reduction_tool'] = MLoptions['MLReductionLib'][0]
2123
2124 if timing2 == None:
2125 return None, None
2126
2127
2128 timing = dict(timing1.items()+timing2.items())
2129 stability = myProfiler.check_matrix_element_stability(matrix_element,
2130 options=options, infos_IN=timing,param_card=param_card,
2131 keep_folder = keep_folder,
2132 MLOptions = MLoptions)
2133 if stability == None:
2134 return None, None
2135 else:
2136 timing['loop_optimized_output']=myProfiler.loop_optimized_output
2137 stability['loop_optimized_output']=myProfiler.loop_optimized_output
2138 return timing, stability
2139
2140
2141
2142
2143 -def check_stability(process_definition, param_card = None,cuttools="",tir={},
2144 options=None,nPoints=100, output_path=None,
2145 cmd = FakeInterface(), MLOptions = {}):
2146 """For a single loop process, give a detailed summary of the generation and
2147 execution timing."""
2148
2149 if "reuse" in options:
2150 reuse=options['reuse']
2151 else:
2152 reuse=False
2153
2154 reuse=options['reuse']
2155 keep_folder = reuse
2156 model=process_definition.get('model')
2157
2158 timing, matrix_element = generate_loop_matrix_element(process_definition,
2159 reuse, output_path=output_path, cmd=cmd)
2160 reusing = isinstance(matrix_element, base_objects.Process)
2161 options['reuse'] = reusing
2162 myStabilityChecker = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
2163 output_path=output_path,model=model,cmd=cmd)
2164
2165 if not myStabilityChecker.loop_optimized_output:
2166 MLoptions = {}
2167 else:
2168 MLoptions = MLOptions
2169
2170 if 'COLLIERComputeUVpoles' not in MLoptions:
2171 MLoptions['COLLIERComputeUVpoles']=False
2172 if 'COLLIERComputeIRpoles' not in MLoptions:
2173 MLoptions['COLLIERComputeIRpoles']=False
2174
2175 if 'COLLIERRequiredAccuracy' not in MLoptions:
2176 MLoptions['COLLIERRequiredAccuracy']=1e-13
2177
2178 if 'COLLIERUseInternalStabilityTest' not in MLoptions:
2179 MLoptions['COLLIERUseInternalStabilityTest']=False
2180
2181
2182
2183 MLoptions['COLLIERGlobalCache'] = 0
2184
2185 if "MLReductionLib" not in MLOptions:
2186 MLoptions["MLReductionLib"] = []
2187 if cuttools:
2188 MLoptions["MLReductionLib"].extend([1])
2189 if "iregi_dir" in tir:
2190 MLoptions["MLReductionLib"].extend([3])
2191 if "pjfry_dir" in tir:
2192 MLoptions["MLReductionLib"].extend([2])
2193 if "golem_dir" in tir:
2194 MLoptions["MLReductionLib"].extend([4])
2195 if "samurai_dir" in tir:
2196 MLoptions["MLReductionLib"].extend([5])
2197 if "ninja_dir" in tir:
2198 MLoptions["MLReductionLib"].extend([6])
2199 if "collier_dir" in tir:
2200 MLoptions["MLReductionLib"].extend([7])
2201
2202 stability = myStabilityChecker.check_matrix_element_stability(matrix_element,
2203 options=options,param_card=param_card,
2204 keep_folder=keep_folder,
2205 MLOptions=MLoptions)
2206
2207 if stability == None:
2208 return None
2209 else:
2210 stability['loop_optimized_output']=myStabilityChecker.loop_optimized_output
2211 return stability
2212
2213
2214
2215
2216 -def check_timing(process_definition, param_card= None, cuttools="",tir={},
2217 output_path=None, options={}, cmd = FakeInterface(),
2218 MLOptions = {}):
2219 """For a single loop process, give a detailed summary of the generation and
2220 execution timing."""
2221
2222 if 'reuse' not in options:
2223 keep_folder = False
2224 else:
2225 keep_folder = options['reuse']
2226 model=process_definition.get('model')
2227 timing1, matrix_element = generate_loop_matrix_element(process_definition,
2228 keep_folder, output_path=output_path, cmd=cmd)
2229 reusing = isinstance(matrix_element, base_objects.Process)
2230 options['reuse'] = reusing
2231 myTimer = LoopMatrixElementTimer(cuttools_dir=cuttools,model=model,tir_dir=tir,
2232 output_path=output_path, cmd=cmd)
2233
2234 if not myTimer.loop_optimized_output:
2235 MLoptions = {}
2236 else:
2237 MLoptions = MLOptions
2238
2239 if 'COLLIERComputeUVpoles' not in MLoptions:
2240 MLoptions['COLLIERComputeUVpoles']=False
2241 if 'COLLIERComputeIRpoles' not in MLoptions:
2242 MLoptions['COLLIERComputeIRpoles']=False
2243
2244 if 'COLLIERGlobalCache' not in MLoptions:
2245 MLoptions['COLLIERGlobalCache']=-1
2246
2247 if 'MLReductionLib' not in MLoptions or \
2248 len(MLoptions['MLReductionLib'])==0:
2249 MLoptions['MLReductionLib'] = [6]
2250
2251 timing2 = myTimer.time_matrix_element(matrix_element, reusing, param_card,
2252 keep_folder = keep_folder, options=options,
2253 MLOptions = MLoptions)
2254
2255 if timing2 == None:
2256 return None
2257 else:
2258
2259 res = dict(timing1.items()+timing2.items())
2260 res['loop_optimized_output']=myTimer.loop_optimized_output
2261 res['reduction_tool'] = MLoptions['MLReductionLib'][0]
2262 return res
2263
2264
2265
2266
2267 -def check_processes(processes, param_card = None, quick = [],cuttools="",tir={},
2268 options=None, reuse = False, output_path=None, cmd = FakeInterface()):
2269 """Check processes by generating them with all possible orderings
2270 of particles (which means different diagram building and Helas
2271 calls), and comparing the resulting matrix element values."""
2272
2273 cmass_scheme = cmd.options['complex_mass_scheme']
2274 if isinstance(processes, base_objects.ProcessDefinition):
2275
2276
2277 multiprocess = processes
2278 model = multiprocess.get('model')
2279
2280
2281 if multiprocess.get('perturbation_couplings')==[]:
2282 evaluator = MatrixElementEvaluator(model,
2283 auth_skipping = True, reuse = False, cmd = cmd)
2284 else:
2285 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
2286 model=model, auth_skipping = True,
2287 reuse = False, output_path=output_path, cmd = cmd)
2288
2289 results = run_multiprocs_no_crossings(check_process,
2290 multiprocess,
2291 evaluator,
2292 quick,
2293 options)
2294
2295 if "used_lorentz" not in evaluator.stored_quantities:
2296 evaluator.stored_quantities["used_lorentz"] = []
2297
2298 if multiprocess.get('perturbation_couplings')!=[] and not reuse:
2299
2300 clean_up(output_path)
2301
2302 return results, evaluator.stored_quantities["used_lorentz"]
2303
2304 elif isinstance(processes, base_objects.Process):
2305 processes = base_objects.ProcessList([processes])
2306 elif isinstance(processes, base_objects.ProcessList):
2307 pass
2308 else:
2309 raise InvalidCmd("processes is of non-supported format")
2310
2311 if not processes:
2312 raise InvalidCmd("No processes given")
2313
2314 model = processes[0].get('model')
2315
2316
2317 if processes[0].get('perturbation_couplings')==[]:
2318 evaluator = MatrixElementEvaluator(model, param_card,
2319 auth_skipping = True, reuse = False, cmd = cmd)
2320 else:
2321 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools, tir_dir=tir,
2322 model=model,param_card=param_card,
2323 auth_skipping = True, reuse = False,
2324 output_path=output_path, cmd = cmd)
2325
2326
2327
2328 sorted_ids = []
2329 comparison_results = []
2330
2331
2332 for process in processes:
2333
2334
2335 if check_already_checked([l.get('id') for l in process.get('legs') if \
2336 not l.get('state')],
2337 [l.get('id') for l in process.get('legs') if \
2338 l.get('state')],
2339 sorted_ids, process, model):
2340 continue
2341
2342 res = check_process(process, evaluator, quick, options)
2343 if res:
2344 comparison_results.append(res)
2345
2346 if "used_lorentz" not in evaluator.stored_quantities:
2347 evaluator.stored_quantities["used_lorentz"] = []
2348
2349 if processes[0].get('perturbation_couplings')!=[] and not reuse:
2350
2351 clean_up(output_path)
2352
2353 return comparison_results, evaluator.stored_quantities["used_lorentz"]
2354
2356 """Check the helas calls for a process by generating the process
2357 using all different permutations of the process legs (or, if
2358 quick, use a subset of permutations), and check that the matrix
2359 element is invariant under this."""
2360
2361 model = process.get('model')
2362
2363
2364 for i, leg in enumerate(process.get('legs')):
2365 leg.set('number', i+1)
2366
2367 logger.info("Checking crossings of %s" % \
2368 process.nice_string().replace('Process:', 'process'))
2369
2370 process_matrix_elements = []
2371
2372
2373
2374 if quick:
2375 leg_positions = [[] for leg in process.get('legs')]
2376 quick = range(1,len(process.get('legs')) + 1)
2377
2378 values = []
2379
2380
2381 number_checked=0
2382 for legs in itertools.permutations(process.get('legs')):
2383
2384 order = [l.get('number') for l in legs]
2385 if quick:
2386 found_leg = True
2387 for num in quick:
2388
2389
2390 leg_position = legs.index([l for l in legs if \
2391 l.get('number') == num][0])
2392
2393 if not leg_position in leg_positions[num-1]:
2394 found_leg = False
2395 leg_positions[num-1].append(leg_position)
2396
2397 if found_leg:
2398 continue
2399
2400
2401
2402 if quick and process.get('perturbation_couplings') and number_checked >3:
2403 continue
2404
2405 legs = base_objects.LegList(legs)
2406
2407 if order != range(1,len(legs) + 1):
2408 logger.info("Testing permutation: %s" % \
2409 order)
2410
2411 newproc = copy.copy(process)
2412 newproc.set('legs',legs)
2413
2414
2415 try:
2416 if newproc.get('perturbation_couplings')==[]:
2417 amplitude = diagram_generation.Amplitude(newproc)
2418 else:
2419
2420 loop_base_objects.cutting_method = 'optimal' if \
2421 number_checked%2 == 0 else 'default'
2422 amplitude = loop_diagram_generation.LoopAmplitude(newproc)
2423 except InvalidCmd:
2424 result=False
2425 else:
2426 result = amplitude.get('diagrams')
2427
2428 loop_base_objects.cutting_method = 'optimal'
2429
2430 if not result:
2431
2432 logging.info("No diagrams for %s" % \
2433 process.nice_string().replace('Process', 'process'))
2434 break
2435
2436 if order == range(1,len(legs) + 1):
2437
2438 p, w_rambo = evaluator.get_momenta(process, options)
2439
2440
2441 if not isinstance(amplitude,loop_diagram_generation.LoopAmplitude):
2442 matrix_element = helas_objects.HelasMatrixElement(amplitude,
2443 gen_color=False)
2444 else:
2445 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
2446 optimized_output=evaluator.loop_optimized_output)
2447
2448
2449
2450
2451 if amplitude.get('process').get('has_born'):
2452
2453
2454 if matrix_element in process_matrix_elements:
2455
2456
2457 continue
2458
2459 process_matrix_elements.append(matrix_element)
2460
2461 res = evaluator.evaluate_matrix_element(matrix_element, p = p,
2462 options=options)
2463 if res == None:
2464 break
2465
2466 values.append(res[0])
2467 number_checked += 1
2468
2469
2470
2471 if abs(max(values)) + abs(min(values)) > 0 and \
2472 2 * abs(max(values) - min(values)) / \
2473 (abs(max(values)) + abs(min(values))) > 0.01:
2474 break
2475
2476
2477 if not values:
2478 return None
2479
2480
2481
2482 diff = 0
2483 if abs(max(values)) + abs(min(values)) > 0:
2484 diff = 2* abs(max(values) - min(values)) / \
2485 (abs(max(values)) + abs(min(values)))
2486
2487
2488 if process.get('perturbation_couplings'):
2489 passed = diff < 1.e-5
2490 else:
2491 passed = diff < 1.e-8
2492
2493 return {"process": process,
2494 "momenta": p,
2495 "values": values,
2496 "difference": diff,
2497 "passed": passed}
2498
2500 """Clean-up the possible left-over outputs from 'evaluate_matrix element' of
2501 the LoopMatrixEvaluator (when its argument proliferate is set to true). """
2502
2503 if mg_root is None:
2504 pass
2505
2506 directories = misc.glob('%s*' % temp_dir_prefix, mg_root)
2507 if directories != []:
2508 logger.debug("Cleaning temporary %s* check runs."%temp_dir_prefix)
2509 for dir in directories:
2510
2511 if os.path.isdir(pjoin(dir,'SubProcesses')):
2512 shutil.rmtree(dir)
2513
2522
2523 -def output_profile(myprocdef, stability, timing, output_path, reusing=False):
2524 """Present the results from a timing and stability consecutive check"""
2525
2526 opt = timing['loop_optimized_output']
2527
2528 text = 'Timing result for the '+('optimized' if opt else 'default')+\
2529 ' output:\n'
2530 text += output_timings(myprocdef,timing)
2531
2532 text += '\nStability result for the '+('optimized' if opt else 'default')+\
2533 ' output:\n'
2534 text += output_stability(stability,output_path, reusing=reusing)
2535
2536 mode = 'optimized' if opt else 'default'
2537 logFilePath = pjoin(output_path, 'profile_%s_%s.log'\
2538 %(mode,stability['Process'].shell_string()))
2539 logFile = open(logFilePath, 'w')
2540 logFile.write(text)
2541 logFile.close()
2542 logger.info('Log of this profile check was output to file %s'\
2543 %str(logFilePath))
2544 return text
2545
2547 """Present the result of a stability check in a nice format.
2548 The full info is printed out in 'Stability_result_<proc_shell_string>.dat'
2549 under the MadGraph5_aMC@NLO root folder (output_path)"""
2550
2551 def accuracy(eval_list):
2552 """ Compute the accuracy from different evaluations."""
2553 return (2.0*(max(eval_list)-min(eval_list))/
2554 abs(max(eval_list)+min(eval_list)))
2555
2556 def best_estimate(eval_list):
2557 """ Returns the best estimate from different evaluations."""
2558 return (max(eval_list)+min(eval_list))/2.0
2559
2560 def loop_direction_test_power(eval_list):
2561 """ Computes the loop direction test power P is computed as follow:
2562 P = accuracy(loop_dir_test) / accuracy(all_test)
2563 So that P is large if the loop direction test is effective.
2564 The tuple returned is (log(median(P)),log(min(P)),frac)
2565 where frac is the fraction of events with powers smaller than -3
2566 which means events for which the reading direction test shows an
2567 accuracy three digits higher than it really is according to the other
2568 tests."""
2569 powers=[]
2570 for eval in eval_list:
2571 loop_dir_evals = [eval['CTModeA'],eval['CTModeB']]
2572
2573 other_evals = [eval[key] for key in eval.keys() if key not in \
2574 ['CTModeB','Accuracy']]
2575 if accuracy(other_evals)!=0.0 and accuracy(loop_dir_evals)!=0.0:
2576 powers.append(accuracy(loop_dir_evals)/accuracy(other_evals))
2577
2578 n_fail=0
2579 for p in powers:
2580 if (math.log(p)/math.log(10))<-3:
2581 n_fail+=1
2582
2583 if len(powers)==0:
2584 return (None,None,None)
2585
2586 return (math.log(median(powers))/math.log(10),
2587 math.log(min(powers))/math.log(10),
2588 n_fail/len(powers))
2589
2590 def test_consistency(dp_eval_list, qp_eval_list):
2591 """ Computes the consistency test C from the DP and QP evaluations.
2592 C = accuracy(all_DP_test) / abs(best_QP_eval-best_DP_eval)
2593 So a consistent test would have C as close to one as possible.
2594 The tuple returned is (log(median(C)),log(min(C)),log(max(C)))"""
2595 consistencies = []
2596 for dp_eval, qp_eval in zip(dp_eval_list,qp_eval_list):
2597 dp_evals = [dp_eval[key] for key in dp_eval.keys() \
2598 if key!='Accuracy']
2599 qp_evals = [qp_eval[key] for key in qp_eval.keys() \
2600 if key!='Accuracy']
2601 if (abs(best_estimate(qp_evals)-best_estimate(dp_evals)))!=0.0 and \
2602 accuracy(dp_evals)!=0.0:
2603 consistencies.append(accuracy(dp_evals)/(abs(\
2604 best_estimate(qp_evals)-best_estimate(dp_evals))))
2605
2606 if len(consistencies)==0:
2607 return (None,None,None)
2608
2609 return (math.log(median(consistencies))/math.log(10),
2610 math.log(min(consistencies))/math.log(10),
2611 math.log(max(consistencies))/math.log(10))
2612
2613 def median(orig_list):
2614 """ Find the median of a sorted float list. """
2615 list=copy.copy(orig_list)
2616 list.sort()
2617 if len(list)%2==0:
2618 return (list[int((len(list)/2)-1)]+list[int(len(list)/2)])/2.0
2619 else:
2620 return list[int((len(list)-1)/2)]
2621
2622
2623 f = format_output
2624
2625 opt = stability['loop_optimized_output']
2626
2627 mode = 'optimized' if opt else 'default'
2628 process = stability['Process']
2629 res_str = "Stability checking for %s (%s mode)\n"\
2630 %(process.nice_string()[9:],mode)
2631
2632 logFile = open(pjoin(output_path, 'stability_%s_%s.log'\
2633 %(mode,process.shell_string())), 'w')
2634
2635 logFile.write('Stability check results\n\n')
2636 logFile.write(res_str)
2637 data_plot_dict={}
2638 accuracy_dict={}
2639 nPSmax=0
2640 max_acc=0.0
2641 min_acc=1.0
2642 if stability['Stability']:
2643 toolnames= stability['Stability'].keys()
2644 toolnamestr=" | ".join(tn+
2645 ''.join([' ']*(10-len(tn))) for tn in toolnames)
2646 DP_stability = [[eval['Accuracy'] for eval in stab['DP_stability']] \
2647 for key,stab in stability['Stability'].items()]
2648 med_dp_stab_str=" | ".join([f(median(dp_stab),'%.2e ') for dp_stab in DP_stability])
2649 min_dp_stab_str=" | ".join([f(min(dp_stab),'%.2e ') for dp_stab in DP_stability])
2650 max_dp_stab_str=" | ".join([f(max(dp_stab),'%.2e ') for dp_stab in DP_stability])
2651 UPS = [stab['Unstable_PS_points'] for key,stab in stability['Stability'].items()]
2652 res_str_i = "\n= Tool (DoublePrec for CT)....... %s\n"%toolnamestr
2653 len_PS=["%i"%len(evals)+\
2654 ''.join([' ']*(10-len("%i"%len(evals)))) for evals in DP_stability]
2655 len_PS_str=" | ".join(len_PS)
2656 res_str_i += "|= Number of PS points considered %s\n"%len_PS_str
2657 res_str_i += "|= Median accuracy............... %s\n"%med_dp_stab_str
2658 res_str_i += "|= Max accuracy.................. %s\n"%min_dp_stab_str
2659 res_str_i += "|= Min accuracy.................. %s\n"%max_dp_stab_str
2660 pmedminlist=[]
2661 pfraclist=[]
2662 for key,stab in stability['Stability'].items():
2663 (pmed,pmin,pfrac)=loop_direction_test_power(stab['DP_stability'])
2664 ldtest_str = "%s,%s"%(f(pmed,'%.1f'),f(pmin,'%.1f'))
2665 pfrac_str = f(pfrac,'%.2e')
2666 pmedminlist.append(ldtest_str+''.join([' ']*(10-len(ldtest_str))))
2667 pfraclist.append(pfrac_str+''.join([' ']*(10-len(pfrac_str))))
2668 pmedminlist_str=" | ".join(pmedminlist)
2669 pfraclist_str=" | ".join(pfraclist)
2670 res_str_i += "|= Overall DP loop_dir test power %s\n"%pmedminlist_str
2671 res_str_i += "|= Fraction of evts with power<-3 %s\n"%pfraclist_str
2672 len_UPS=["%i"%len(upup)+\
2673 ''.join([' ']*(10-len("%i"%len(upup)))) for upup in UPS]
2674 len_UPS_str=" | ".join(len_UPS)
2675 res_str_i += "|= Number of Unstable PS points %s\n"%len_UPS_str
2676 res_str_i += \
2677 """
2678 = Legend for the statistics of the stability tests. (all log below ar log_10)
2679 The loop direction test power P is computed as follow:
2680 P = accuracy(loop_dir_test) / accuracy(all_other_test)
2681 So that log(P) is positive if the loop direction test is effective.
2682 The tuple printed out is (log(median(P)),log(min(P)))
2683 The consistency test C is computed when QP evaluations are available:
2684 C = accuracy(all_DP_test) / abs(best_QP_eval-best_DP_eval)
2685 So a consistent test would have log(C) as close to zero as possible.
2686 The tuple printed out is (log(median(C)),log(min(C)),log(max(C)))\n"""
2687 res_str+=res_str_i
2688 for key in stability['Stability'].keys():
2689 toolname=key
2690 stab=stability['Stability'][key]
2691 DP_stability = [eval['Accuracy'] for eval in stab['DP_stability']]
2692
2693 QP_stability = [eval['Accuracy'] if eval!={} else -1.0 for eval in \
2694 stab['QP_stability']]
2695 nPS = len(DP_stability)
2696 if nPS>nPSmax:nPSmax=nPS
2697 UPS = stab['Unstable_PS_points']
2698 UPS_stability_DP = [DP_stability[U[0]] for U in UPS]
2699 UPS_stability_QP = [QP_stability[U[0]] for U in UPS]
2700 EPS = stab['Exceptional_PS_points']
2701 EPS_stability_DP = [DP_stability[E[0]] for E in EPS]
2702 EPS_stability_QP = [QP_stability[E[0]] for E in EPS]
2703 res_str_i = ""
2704
2705 xml_toolname = {'GOLEM95':'GOLEM','IREGI':'IREGI',
2706 'CUTTOOLS':'CUTTOOLS','PJFRY++':'PJFRY',
2707 'NINJA':'NINJA','SAMURAI':'SAMURAI',
2708 'COLLIER':'COLLIER'}[toolname.upper()]
2709 if len(UPS)>0:
2710 res_str_i = "\nDetails of the %d/%d UPS encountered by %s\n"\
2711 %(len(UPS),nPS,toolname)
2712 prefix = 'DP' if toolname=='CutTools' else ''
2713 res_str_i += "|= %s Median inaccuracy.......... %s\n"\
2714 %(prefix,f(median(UPS_stability_DP),'%.2e'))
2715 res_str_i += "|= %s Max accuracy............... %s\n"\
2716 %(prefix,f(min(UPS_stability_DP),'%.2e'))
2717 res_str_i += "|= %s Min accuracy............... %s\n"\
2718 %(prefix,f(max(UPS_stability_DP),'%.2e'))
2719 (pmed,pmin,pfrac)=loop_direction_test_power(\
2720 [stab['DP_stability'][U[0]] for U in UPS])
2721 if toolname=='CutTools':
2722 res_str_i += "|= UPS DP loop_dir test power.... %s,%s\n"\
2723 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2724 res_str_i += "|= UPS DP fraction with power<-3. %s\n"\
2725 %f(pfrac,'%.2e')
2726 res_str_i += "|= QP Median accuracy............ %s\n"\
2727 %f(median(UPS_stability_QP),'%.2e')
2728 res_str_i += "|= QP Max accuracy............... %s\n"\
2729 %f(min(UPS_stability_QP),'%.2e')
2730 res_str_i += "|= QP Min accuracy............... %s\n"\
2731 %f(max(UPS_stability_QP),'%.2e')
2732 (pmed,pmin,pfrac)=loop_direction_test_power(\
2733 [stab['QP_stability'][U[0]] for U in UPS])
2734 res_str_i += "|= UPS QP loop_dir test power.... %s,%s\n"\
2735 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2736 res_str_i += "|= UPS QP fraction with power<-3. %s\n"%f(pfrac,'%.2e')
2737 (pmed,pmin,pmax)=test_consistency(\
2738 [stab['DP_stability'][U[0]] for U in UPS],
2739 [stab['QP_stability'][U[0]] for U in UPS])
2740 res_str_i += "|= DP vs QP stab test consistency %s,%s,%s\n"\
2741 %(f(pmed,'%.1f'),f(pmin,'%.1f'),f(pmax,'%.1f'))
2742 if len(EPS)==0:
2743 res_str_i += "= Number of Exceptional PS points : 0\n"
2744 if len(EPS)>0:
2745 res_str_i = "\nDetails of the %d/%d EPS encountered by %s\n"\
2746 %(len(EPS),nPS,toolname)
2747 res_str_i += "|= DP Median accuracy............ %s\n"\
2748 %f(median(EPS_stability_DP),'%.2e')
2749 res_str_i += "|= DP Max accuracy............... %s\n"\
2750 %f(min(EPS_stability_DP),'%.2e')
2751 res_str_i += "|= DP Min accuracy............... %s\n"\
2752 %f(max(EPS_stability_DP),'%.2e')
2753 pmed,pmin,pfrac=loop_direction_test_power(\
2754 [stab['DP_stability'][E[0]] for E in EPS])
2755 res_str_i += "|= EPS DP loop_dir test power.... %s,%s\n"\
2756 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2757 res_str_i += "|= EPS DP fraction with power<-3. %s\n"\
2758 %f(pfrac,'%.2e')
2759 res_str_i += "|= QP Median accuracy............ %s\n"\
2760 %f(median(EPS_stability_QP),'%.2e')
2761 res_str_i += "|= QP Max accuracy............... %s\n"\
2762 %f(min(EPS_stability_QP),'%.2e')
2763 res_str_i += "|= QP Min accuracy............... %s\n"\
2764 %f(max(EPS_stability_QP),'%.2e')
2765 pmed,pmin,pfrac=loop_direction_test_power(\
2766 [stab['QP_stability'][E[0]] for E in EPS])
2767 res_str_i += "|= EPS QP loop_dir test power.... %s,%s\n"\
2768 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2769 res_str_i += "|= EPS QP fraction with power<-3. %s\n"%f(pfrac,'%.2e')
2770
2771 logFile.write(res_str_i)
2772
2773 if len(EPS)>0:
2774 logFile.write('\nFull details of the %i EPS encountered by %s.\n'\
2775 %(len(EPS),toolname))
2776 logFile.write('<EPS_data reduction=%s>\n'%xml_toolname.upper())
2777 for i, eps in enumerate(EPS):
2778 logFile.write('\nEPS #%i\n'%(i+1))
2779 logFile.write('\n'.join([' '+' '.join(['%.16E'%pi for pi in p]) \
2780 for p in eps[1]]))
2781 logFile.write('\n DP accuracy : %.4e\n'%DP_stability[eps[0]])
2782 logFile.write(' QP accuracy : %.4e\n'%QP_stability[eps[0]])
2783 logFile.write('</EPS_data>\n')
2784 if len(UPS)>0:
2785 logFile.write('\nFull details of the %i UPS encountered by %s.\n'\
2786 %(len(UPS),toolname))
2787 logFile.write('<UPS_data reduction=%s>\n'%xml_toolname.upper())
2788 for i, ups in enumerate(UPS):
2789 logFile.write('\nUPS #%i\n'%(i+1))
2790 logFile.write('\n'.join([' '+' '.join(['%.16E'%pi for pi in p]) \
2791 for p in ups[1]]))
2792 logFile.write('\n DP accuracy : %.4e\n'%DP_stability[ups[0]])
2793 logFile.write(' QP accuracy : %.4e\n'%QP_stability[ups[0]])
2794 logFile.write('</UPS_data>\n')
2795
2796 logFile.write('\nData entries for the stability plot.\n')
2797 logFile.write('First row is a maximal accuracy delta, second is the '+\
2798 'fraction of events with DP accuracy worse than delta.\n')
2799 logFile.write('<plot_data reduction=%s>\n'%xml_toolname.upper())
2800
2801 if max(DP_stability)>0.0:
2802 min_digit_acc=int(math.log(max(DP_stability))/math.log(10))
2803 if min_digit_acc>=0:
2804 min_digit_acc = min_digit_acc+1
2805 accuracies=[10**(-17+(i/5.0)) for i in range(5*(17+min_digit_acc)+1)]
2806 else:
2807 logFile.writelines('%.4e %.4e\n'%(accuracies[i], 0.0) for i in \
2808 range(len(accuracies)))
2809 logFile.write('</plot_data>\n')
2810 res_str_i += '\nPerfect accuracy over all the trial PS points. No plot'+\
2811 ' is output then.'
2812 logFile.write('Perfect accuracy over all the trial PS points.')
2813 res_str +=res_str_i
2814 continue
2815
2816 accuracy_dict[toolname]=accuracies
2817 if max(accuracies) > max_acc: max_acc=max(accuracies)
2818 if min(accuracies) < min_acc: min_acc=min(accuracies)
2819 data_plot=[]
2820 for acc in accuracies:
2821 data_plot.append(float(len([d for d in DP_stability if d>acc]))\
2822 /float(len(DP_stability)))
2823 data_plot_dict[toolname]=data_plot
2824
2825 logFile.writelines('%.4e %.4e\n'%(accuracies[i], data_plot[i]) for i in \
2826 range(len(accuracies)))
2827 logFile.write('</plot_data>\n')
2828 logFile.write('\nList of accuracies recorded for the %i evaluations with %s\n'\
2829 %(nPS,toolname))
2830 logFile.write('First row is DP, second is QP (if available).\n\n')
2831 logFile.write('<accuracies reduction=%s>\n'%xml_toolname.upper())
2832 logFile.writelines('%.4e '%DP_stability[i]+('NA\n' if QP_stability[i]==-1.0 \
2833 else '%.4e\n'%QP_stability[i]) for i in range(nPS))
2834 logFile.write('</accuracies>\n')
2835 res_str+=res_str_i
2836 logFile.close()
2837 res_str += "\n= Stability details of the run are output to the file"+\
2838 " stability_%s_%s.log\n"%(mode,process.shell_string())
2839
2840
2841
2842
2843 if any(isinstance(handler,logging.FileHandler) for handler in \
2844 logging.getLogger('madgraph').handlers):
2845 return res_str
2846
2847 try:
2848 import matplotlib.pyplot as plt
2849 colorlist=['b','r','g','y','m','c','k']
2850 for i,key in enumerate(data_plot_dict.keys()):
2851 color=colorlist[i]
2852 data_plot=data_plot_dict[key]
2853 accuracies=accuracy_dict[key]
2854 plt.plot(accuracies, data_plot, color=color, marker='', linestyle='-',\
2855 label=key)
2856 plt.axis([min_acc,max_acc,\
2857 10**(-int(math.log(nPSmax-0.5)/math.log(10))-1), 1])
2858 plt.yscale('log')
2859 plt.xscale('log')
2860 plt.title('Stability plot for %s (%s mode, %d points)'%\
2861 (process.nice_string()[9:],mode,nPSmax))
2862 plt.ylabel('Fraction of events')
2863 plt.xlabel('Maximal precision')
2864 plt.legend()
2865 if not reusing:
2866 logger.info('Some stability statistics will be displayed once you '+\
2867 'close the plot window')
2868 plt.show()
2869 else:
2870 fig_output_file = str(pjoin(output_path,
2871 'stability_plot_%s_%s.png'%(mode,process.shell_string())))
2872 logger.info('Stability plot output to file %s. '%fig_output_file)
2873 plt.savefig(fig_output_file)
2874 return res_str
2875 except Exception as e:
2876 if isinstance(e, ImportError):
2877 res_str += "\n= Install matplotlib to get a "+\
2878 "graphical display of the results of this check."
2879 else:
2880 res_str += "\n= Could not produce the stability plot because of "+\
2881 "the following error: %s"%str(e)
2882 return res_str
2883
2885 """Present the result of a timings check in a nice format """
2886
2887
2888 f = format_output
2889 loop_optimized_output = timings['loop_optimized_output']
2890 reduction_tool = bannermod.MadLoopParam._ID_reduction_tool_map[
2891 timings['reduction_tool']]
2892
2893 res_str = "%s \n"%process.nice_string()
2894 try:
2895 gen_total = timings['HELAS_MODEL_compilation']+\
2896 timings['HelasDiagrams_generation']+\
2897 timings['Process_output']+\
2898 timings['Diagrams_generation']+\
2899 timings['Process_compilation']+\
2900 timings['Initialization']
2901 except TypeError:
2902 gen_total = None
2903 res_str += "\n= Generation time total...... ========== %s\n"%f(gen_total,'%.3gs')
2904 res_str += "|= Diagrams generation....... %s\n"\
2905 %f(timings['Diagrams_generation'],'%.3gs')
2906 res_str += "|= Helas Diagrams generation. %s\n"\
2907 %f(timings['HelasDiagrams_generation'],'%.3gs')
2908 res_str += "|= Process output............ %s\n"\
2909 %f(timings['Process_output'],'%.3gs')
2910 res_str += "|= HELAS+model compilation... %s\n"\
2911 %f(timings['HELAS_MODEL_compilation'],'%.3gs')
2912 res_str += "|= Process compilation....... %s\n"\
2913 %f(timings['Process_compilation'],'%.3gs')
2914 res_str += "|= Initialization............ %s\n"\
2915 %f(timings['Initialization'],'%.3gs')
2916
2917 res_str += "\n= Reduction tool tested...... %s\n"%reduction_tool
2918 res_str += "\n= Helicity sum time / PSpoint ========== %.3gms\n"\
2919 %(timings['run_unpolarized_total']*1000.0)
2920 if loop_optimized_output:
2921 coef_time=timings['run_unpolarized_coefs']*1000.0
2922 loop_time=(timings['run_unpolarized_total']-\
2923 timings['run_unpolarized_coefs'])*1000.0
2924 total=coef_time+loop_time
2925 res_str += "|= Coefs. computation time... %.3gms (%d%%)\n"\
2926 %(coef_time,int(round(100.0*coef_time/total)))
2927 res_str += "|= Loop evaluation time...... %.3gms (%d%%)\n"\
2928 %(loop_time,int(round(100.0*loop_time/total)))
2929 res_str += "\n= One helicity time / PSpoint ========== %.3gms\n"\
2930 %(timings['run_polarized_total']*1000.0)
2931 if loop_optimized_output:
2932 coef_time=timings['run_polarized_coefs']*1000.0
2933 loop_time=(timings['run_polarized_total']-\
2934 timings['run_polarized_coefs'])*1000.0
2935 total=coef_time+loop_time
2936 res_str += "|= Coefs. computation time... %.3gms (%d%%)\n"\
2937 %(coef_time,int(round(100.0*coef_time/total)))
2938 res_str += "|= Loop evaluation time...... %.3gms (%d%%)\n"\
2939 %(loop_time,int(round(100.0*loop_time/total)))
2940 res_str += "\n= Miscellaneous ========================\n"
2941 res_str += "|= Number of hel. computed... %s/%s\n"\
2942 %(f(timings['n_contrib_hel'],'%d'),f(timings['n_tot_hel'],'%d'))
2943 res_str += "|= Number of loop diagrams... %s\n"%f(timings['n_loops'],'%d')
2944 if loop_optimized_output:
2945 res_str += "|= Number of loop groups..... %s\n"\
2946 %f(timings['n_loop_groups'],'%d')
2947 res_str += "|= Number of loop wfs........ %s\n"\
2948 %f(timings['n_loop_wfs'],'%d')
2949 if timings['loop_wfs_ranks']!=None:
2950 for i, r in enumerate(timings['loop_wfs_ranks']):
2951 res_str += "||= # of loop wfs of rank %d.. %d\n"%(i,r)
2952 res_str += "|= Loading time (Color data). ~%.3gms\n"\
2953 %(timings['Booting_time']*1000.0)
2954 res_str += "|= Maximum RAM usage (rss)... %s\n"\
2955 %f(float(timings['ram_usage']/1000.0),'%.3gMb')
2956 res_str += "\n= Output disk size =====================\n"
2957 res_str += "|= Source directory sources.. %s\n"%f(timings['du_source'],'%sb')
2958 res_str += "|= Process sources........... %s\n"%f(timings['du_process'],'%sb')
2959 res_str += "|= Color and helicity data... %s\n"%f(timings['du_color'],'%sb')
2960 res_str += "|= Executable size........... %s\n"%f(timings['du_exe'],'%sb')
2961
2962 return res_str
2963
2965 """Present the results of a comparison in a nice list format
2966 mode short: return the number of fail process
2967 """
2968 proc_col_size = 17
2969 pert_coupl = comparison_results[0]['process']['perturbation_couplings']
2970 if pert_coupl:
2971 process_header = "Process [virt="+" ".join(pert_coupl)+"]"
2972 else:
2973 process_header = "Process"
2974
2975 if len(process_header) + 1 > proc_col_size:
2976 proc_col_size = len(process_header) + 1
2977
2978 for proc in comparison_results:
2979 if len(proc['process'].base_string()) + 1 > proc_col_size:
2980 proc_col_size = len(proc['process'].base_string()) + 1
2981
2982 col_size = 18
2983
2984 pass_proc = 0
2985 fail_proc = 0
2986 no_check_proc = 0
2987
2988 failed_proc_list = []
2989 no_check_proc_list = []
2990
2991 res_str = fixed_string_length(process_header, proc_col_size) + \
2992 fixed_string_length("Min element", col_size) + \
2993 fixed_string_length("Max element", col_size) + \
2994 fixed_string_length("Relative diff.", col_size) + \
2995 "Result"
2996
2997 for result in comparison_results:
2998 proc = result['process'].base_string()
2999 values = result['values']
3000
3001 if len(values) <= 1:
3002 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
3003 " * No permutations, process not checked *"
3004 no_check_proc += 1
3005 no_check_proc_list.append(result['process'].nice_string())
3006 continue
3007
3008 passed = result['passed']
3009
3010 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
3011 fixed_string_length("%1.10e" % min(values), col_size) + \
3012 fixed_string_length("%1.10e" % max(values), col_size) + \
3013 fixed_string_length("%1.10e" % result['difference'],
3014 col_size)
3015 if passed:
3016 pass_proc += 1
3017 res_str += "Passed"
3018 else:
3019 fail_proc += 1
3020 failed_proc_list.append(result['process'].nice_string())
3021 res_str += "Failed"
3022
3023 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
3024 (pass_proc, pass_proc + fail_proc,
3025 fail_proc, pass_proc + fail_proc)
3026
3027 if fail_proc != 0:
3028 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
3029 if no_check_proc != 0:
3030 res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list)
3031
3032 return res_str
3033
3035 """Helper function to fix the length of a string by cutting it
3036 or adding extra space."""
3037
3038 if len(mystr) > length:
3039 return mystr[0:length]
3040 else:
3041 return mystr + " " * (length - len(mystr))
3042
3043
3044
3045
3046
3047 -def check_gauge(processes, param_card = None,cuttools="", tir={}, reuse = False,
3048 options=None, output_path=None, cmd = FakeInterface()):
3049 """Check gauge invariance of the processes by using the BRS check.
3050 For one of the massless external bosons (e.g. gluon or photon),
3051 replace the polarization vector (epsilon_mu) with its momentum (p_mu)
3052 """
3053 cmass_scheme = cmd.options['complex_mass_scheme']
3054 if isinstance(processes, base_objects.ProcessDefinition):
3055
3056
3057 multiprocess = processes
3058
3059 model = multiprocess.get('model')
3060
3061 if multiprocess.get('perturbation_couplings')==[]:
3062 evaluator = MatrixElementEvaluator(model, param_card,cmd= cmd,
3063 auth_skipping = True, reuse = False)
3064 else:
3065 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3066 cmd=cmd,model=model, param_card=param_card,
3067 auth_skipping = False, reuse = False,
3068 output_path=output_path)
3069
3070 if not cmass_scheme and multiprocess.get('perturbation_couplings')==[]:
3071
3072 logger.info('Set All width to zero for non complex mass scheme checks')
3073 for particle in evaluator.full_model.get('particles'):
3074 if particle.get('width') != 'ZERO':
3075 evaluator.full_model.get('parameter_dict')[particle.get('width')] = 0.
3076 results = run_multiprocs_no_crossings(check_gauge_process,
3077 multiprocess,
3078 evaluator,
3079 options=options
3080 )
3081
3082 if multiprocess.get('perturbation_couplings')!=[] and not reuse:
3083
3084 clean_up(output_path)
3085
3086 return results
3087
3088 elif isinstance(processes, base_objects.Process):
3089 processes = base_objects.ProcessList([processes])
3090 elif isinstance(processes, base_objects.ProcessList):
3091 pass
3092 else:
3093 raise InvalidCmd("processes is of non-supported format")
3094
3095 assert processes, "No processes given"
3096
3097 model = processes[0].get('model')
3098
3099
3100 if processes[0].get('perturbation_couplings')==[]:
3101 evaluator = MatrixElementEvaluator(model, param_card,
3102 auth_skipping = True, reuse = False,
3103 cmd = cmd)
3104 else:
3105 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3106 model=model, param_card=param_card,
3107 auth_skipping = False, reuse = False,
3108 output_path=output_path, cmd = cmd)
3109 comparison_results = []
3110 comparison_explicit_flip = []
3111
3112
3113 for process in processes:
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123 result = check_gauge_process(process, evaluator,options=options)
3124 if result:
3125 comparison_results.append(result)
3126
3127 if processes[0].get('perturbation_couplings')!=[] and not reuse:
3128
3129 clean_up(output_path)
3130
3131 return comparison_results
3132
3135 """Check gauge invariance for the process, unless it is already done."""
3136
3137 model = process.get('model')
3138
3139
3140 found_gauge = False
3141 for i, leg in enumerate(process.get('legs')):
3142 part = model.get_particle(leg.get('id'))
3143 if part.get('spin') == 3 and part.get('mass').lower() == 'zero':
3144 found_gauge = True
3145 break
3146 if not found_gauge:
3147 logger.info("No ward identity for %s" % \
3148 process.nice_string().replace('Process', 'process'))
3149
3150 return None
3151
3152 for i, leg in enumerate(process.get('legs')):
3153 leg.set('number', i+1)
3154
3155 logger.info("Checking ward identities for %s" % \
3156 process.nice_string().replace('Process', 'process'))
3157
3158 legs = process.get('legs')
3159
3160
3161 try:
3162 if process.get('perturbation_couplings')==[]:
3163 amplitude = diagram_generation.Amplitude(process)
3164 else:
3165 amplitude = loop_diagram_generation.LoopAmplitude(process)
3166 except InvalidCmd:
3167 logging.info("No diagrams for %s" % \
3168 process.nice_string().replace('Process', 'process'))
3169 return None
3170 if not amplitude.get('diagrams'):
3171
3172 logging.info("No diagrams for %s" % \
3173 process.nice_string().replace('Process', 'process'))
3174 return None
3175
3176 if not isinstance(amplitude,loop_diagram_generation.LoopAmplitude):
3177 matrix_element = helas_objects.HelasMatrixElement(amplitude,
3178 gen_color = False)
3179 else:
3180 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
3181 optimized_output=evaluator.loop_optimized_output)
3182
3183
3184
3185
3186
3187
3188
3189
3190 brsvalue = evaluator.evaluate_matrix_element(matrix_element, gauge_check = True,
3191 output='jamp', options=options)
3192
3193 if not isinstance(amplitude,loop_diagram_generation.LoopAmplitude):
3194 matrix_element = helas_objects.HelasMatrixElement(amplitude,
3195 gen_color = False)
3196
3197 mvalue = evaluator.evaluate_matrix_element(matrix_element, gauge_check = False,
3198 output='jamp', options=options)
3199
3200 if mvalue and mvalue['m2']:
3201 return {'process':process,'value':mvalue,'brs':brsvalue}
3202
3204 """Present the results of a comparison in a nice list format"""
3205
3206 proc_col_size = 17
3207
3208 pert_coupl = comparison_results[0]['process']['perturbation_couplings']
3209
3210
3211 if pert_coupl:
3212 threshold=1e-5
3213 else:
3214 threshold=1e-10
3215
3216 if pert_coupl:
3217 process_header = "Process [virt="+" ".join(pert_coupl)+"]"
3218 else:
3219 process_header = "Process"
3220
3221 if len(process_header) + 1 > proc_col_size:
3222 proc_col_size = len(process_header) + 1
3223
3224 for one_comp in comparison_results:
3225 proc = one_comp['process'].base_string()
3226 mvalue = one_comp['value']
3227 brsvalue = one_comp['brs']
3228 if len(proc) + 1 > proc_col_size:
3229 proc_col_size = len(proc) + 1
3230
3231 col_size = 18
3232
3233 pass_proc = 0
3234 fail_proc = 0
3235
3236 failed_proc_list = []
3237 no_check_proc_list = []
3238
3239 res_str = fixed_string_length(process_header, proc_col_size) + \
3240 fixed_string_length("matrix", col_size) + \
3241 fixed_string_length("BRS", col_size) + \
3242 fixed_string_length("ratio", col_size) + \
3243 "Result"
3244
3245 for one_comp in comparison_results:
3246 proc = one_comp['process'].base_string()
3247 mvalue = one_comp['value']
3248 brsvalue = one_comp['brs']
3249 ratio = (abs(brsvalue['m2'])/abs(mvalue['m2']))
3250 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
3251 fixed_string_length("%1.10e" % mvalue['m2'], col_size)+ \
3252 fixed_string_length("%1.10e" % brsvalue['m2'], col_size)+ \
3253 fixed_string_length("%1.10e" % ratio, col_size)
3254
3255 if ratio > threshold:
3256 fail_proc += 1
3257 proc_succeed = False
3258 failed_proc_list.append(proc)
3259 res_str += "Failed"
3260 else:
3261 pass_proc += 1
3262 proc_succeed = True
3263 res_str += "Passed"
3264
3265
3266
3267
3268
3269 if len(mvalue['jamp'])!=0:
3270 for k in range(len(mvalue['jamp'][0])):
3271 m_sum = 0
3272 brs_sum = 0
3273
3274 for j in range(len(mvalue['jamp'])):
3275
3276 m_sum += abs(mvalue['jamp'][j][k])**2
3277 brs_sum += abs(brsvalue['jamp'][j][k])**2
3278
3279
3280 if not m_sum:
3281 continue
3282 ratio = abs(brs_sum) / abs(m_sum)
3283
3284 tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , proc_col_size) + \
3285 fixed_string_length("%1.10e" % m_sum, col_size) + \
3286 fixed_string_length("%1.10e" % brs_sum, col_size) + \
3287 fixed_string_length("%1.10e" % ratio, col_size)
3288
3289 if ratio > 1e-15:
3290 if not len(failed_proc_list) or failed_proc_list[-1] != proc:
3291 fail_proc += 1
3292 pass_proc -= 1
3293 failed_proc_list.append(proc)
3294 res_str += tmp_str + "Failed"
3295 elif not proc_succeed:
3296 res_str += tmp_str + "Passed"
3297
3298
3299 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
3300 (pass_proc, pass_proc + fail_proc,
3301 fail_proc, pass_proc + fail_proc)
3302
3303 if fail_proc != 0:
3304 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
3305
3306 if output=='text':
3307 return res_str
3308 else:
3309 return fail_proc
3310
3311
3312
3313 -def check_lorentz(processes, param_card = None,cuttools="", tir={}, options=None, \
3314 reuse = False, output_path=None, cmd = FakeInterface()):
3315 """ Check if the square matrix element (sum over helicity) is lorentz
3316 invariant by boosting the momenta with different value."""
3317
3318 cmass_scheme = cmd.options['complex_mass_scheme']
3319 if isinstance(processes, base_objects.ProcessDefinition):
3320
3321
3322 multiprocess = processes
3323 model = multiprocess.get('model')
3324
3325 if multiprocess.get('perturbation_couplings')==[]:
3326 evaluator = MatrixElementEvaluator(model,
3327 cmd= cmd, auth_skipping = False, reuse = True)
3328 else:
3329 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3330 model=model, auth_skipping = False, reuse = True,
3331 output_path=output_path, cmd = cmd)
3332
3333 if not cmass_scheme and processes.get('perturbation_couplings')==[]:
3334
3335 logger.info('Set All width to zero for non complex mass scheme checks')
3336 for particle in evaluator.full_model.get('particles'):
3337 if particle.get('width') != 'ZERO':
3338 evaluator.full_model.get('parameter_dict')[\
3339 particle.get('width')] = 0.
3340
3341 results = run_multiprocs_no_crossings(check_lorentz_process,
3342 multiprocess,
3343 evaluator,
3344 options=options)
3345
3346 if multiprocess.get('perturbation_couplings')!=[] and not reuse:
3347
3348 clean_up(output_path)
3349
3350 return results
3351
3352 elif isinstance(processes, base_objects.Process):
3353 processes = base_objects.ProcessList([processes])
3354 elif isinstance(processes, base_objects.ProcessList):
3355 pass
3356 else:
3357 raise InvalidCmd("processes is of non-supported format")
3358
3359 assert processes, "No processes given"
3360
3361 model = processes[0].get('model')
3362
3363
3364 if processes[0].get('perturbation_couplings')==[]:
3365 evaluator = MatrixElementEvaluator(model, param_card,
3366 auth_skipping = False, reuse = True,
3367 cmd=cmd)
3368 else:
3369 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools, tir_dir=tir,
3370 model=model,param_card=param_card,
3371 auth_skipping = False, reuse = True,
3372 output_path=output_path, cmd = cmd)
3373
3374 comparison_results = []
3375
3376
3377 for process in processes:
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387 result = check_lorentz_process(process, evaluator,options=options)
3388 if result:
3389 comparison_results.append(result)
3390
3391 if processes[0].get('perturbation_couplings')!=[] and not reuse:
3392
3393 clean_up(output_path)
3394
3395 return comparison_results
3396
3399 """Check gauge invariance for the process, unless it is already done."""
3400
3401 amp_results = []
3402 model = process.get('model')
3403
3404 for i, leg in enumerate(process.get('legs')):
3405 leg.set('number', i+1)
3406
3407 logger.info("Checking lorentz transformations for %s" % \
3408 process.nice_string().replace('Process:', 'process'))
3409
3410 legs = process.get('legs')
3411
3412
3413 try:
3414 if process.get('perturbation_couplings')==[]:
3415 amplitude = diagram_generation.Amplitude(process)
3416 else:
3417 amplitude = loop_diagram_generation.LoopAmplitude(process)
3418 except InvalidCmd:
3419 logging.info("No diagrams for %s" % \
3420 process.nice_string().replace('Process', 'process'))
3421 return None
3422
3423 if not amplitude.get('diagrams'):
3424
3425 logging.info("No diagrams for %s" % \
3426 process.nice_string().replace('Process', 'process'))
3427 return None
3428
3429
3430 p, w_rambo = evaluator.get_momenta(process, options)
3431
3432
3433 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3434 matrix_element = helas_objects.HelasMatrixElement(amplitude,
3435 gen_color = True)
3436 else:
3437 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
3438 optimized_output = evaluator.loop_optimized_output)
3439
3440 MLOptions = {'ImprovePS':True,'ForceMP':True}
3441 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3442 data = evaluator.evaluate_matrix_element(matrix_element, p=p, output='jamp',
3443 auth_skipping = True, options=options)
3444 else:
3445 data = evaluator.evaluate_matrix_element(matrix_element, p=p, output='jamp',
3446 auth_skipping = True, PS_name = 'original', MLOptions=MLOptions,
3447 options = options)
3448
3449 if data and data['m2']:
3450 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3451 results = [data]
3452 else:
3453 results = [('Original evaluation',data)]
3454 else:
3455 return {'process':process, 'results':'pass'}
3456
3457
3458
3459
3460 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3461 for boost in range(1,4):
3462 boost_p = boost_momenta(p, boost)
3463 results.append(evaluator.evaluate_matrix_element(matrix_element,
3464 p=boost_p,output='jamp'))
3465 else:
3466
3467 boost_p = boost_momenta(p, 3)
3468 results.append(('Z-axis boost',
3469 evaluator.evaluate_matrix_element(matrix_element, options=options,
3470 p=boost_p, PS_name='zBoost', output='jamp',MLOptions = MLOptions)))
3471
3472
3473
3474
3475 if not options['events']:
3476 boost_p = boost_momenta(p, 1)
3477 results.append(('X-axis boost',
3478 evaluator.evaluate_matrix_element(matrix_element, options=options,
3479 p=boost_p, PS_name='xBoost', output='jamp',MLOptions = MLOptions)))
3480 boost_p = boost_momenta(p, 2)
3481 results.append(('Y-axis boost',
3482 evaluator.evaluate_matrix_element(matrix_element,options=options,
3483 p=boost_p, PS_name='yBoost', output='jamp',MLOptions = MLOptions)))
3484
3485
3486 rot_p = [[pm[0],-pm[2],pm[1],pm[3]] for pm in p]
3487 results.append(('Z-axis pi/2 rotation',
3488 evaluator.evaluate_matrix_element(matrix_element,options=options,
3489 p=rot_p, PS_name='Rotation1', output='jamp',MLOptions = MLOptions)))
3490
3491 sq2 = math.sqrt(2.0)
3492 rot_p = [[pm[0],(pm[1]-pm[2])/sq2,(pm[1]+pm[2])/sq2,pm[3]] for pm in p]
3493 results.append(('Z-axis pi/4 rotation',
3494 evaluator.evaluate_matrix_element(matrix_element,options=options,
3495 p=rot_p, PS_name='Rotation2', output='jamp',MLOptions = MLOptions)))
3496
3497
3498 return {'process': process, 'results': results}
3499
3500
3501
3502
3503 -def check_unitary_feynman(processes_unit, processes_feynm, param_card=None,
3504 options=None, tir={}, output_path=None,
3505 cuttools="", reuse=False, cmd = FakeInterface()):
3506 """Check gauge invariance of the processes by flipping
3507 the gauge of the model
3508 """
3509
3510 mg_root = cmd._mgme_dir
3511
3512 cmass_scheme = cmd.options['complex_mass_scheme']
3513
3514 if isinstance(processes_unit, base_objects.ProcessDefinition):
3515
3516
3517 multiprocess_unit = processes_unit
3518 model = multiprocess_unit.get('model')
3519
3520
3521
3522 loop_optimized_bu = cmd.options['loop_optimized_output']
3523 if processes_unit.get('squared_orders'):
3524 if processes_unit.get('perturbation_couplings') in [[],['QCD']]:
3525 cmd.options['loop_optimized_output'] = True
3526 else:
3527 raise InvalidCmd("The gauge test cannot be performed for "+
3528 " a process with more than QCD corrections and which"+
3529 " specifies squared order constraints.")
3530 else:
3531 cmd.options['loop_optimized_output'] = False
3532
3533 aloha.unitary_gauge = True
3534 if processes_unit.get('perturbation_couplings')==[]:
3535 evaluator = MatrixElementEvaluator(model, param_card,
3536 cmd=cmd,auth_skipping = False, reuse = True)
3537 else:
3538 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3539 cmd=cmd, model=model,
3540 param_card=param_card,
3541 auth_skipping = False,
3542 output_path=output_path,
3543 reuse = False)
3544 if not cmass_scheme and multiprocess_unit.get('perturbation_couplings')==[]:
3545 logger.info('Set All width to zero for non complex mass scheme checks')
3546 for particle in evaluator.full_model.get('particles'):
3547 if particle.get('width') != 'ZERO':
3548 evaluator.full_model.get('parameter_dict')[particle.get('width')] = 0.
3549
3550 output_u = run_multiprocs_no_crossings(get_value,
3551 multiprocess_unit,
3552 evaluator,
3553 options=options)
3554
3555 clean_added_globals(ADDED_GLOBAL)
3556
3557 if processes_unit.get('perturbation_couplings')!=[]:
3558 clean_up(output_path)
3559
3560 momentum = {}
3561 for data in output_u:
3562 momentum[data['process']] = data['p']
3563
3564 multiprocess_feynm = processes_feynm
3565 model = multiprocess_feynm.get('model')
3566
3567
3568 aloha.unitary_gauge = False
3569
3570
3571 cmd.options['loop_optimized_output'] = True
3572 if processes_feynm.get('perturbation_couplings')==[]:
3573 evaluator = MatrixElementEvaluator(model, param_card,
3574 cmd= cmd, auth_skipping = False, reuse = False)
3575 else:
3576 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3577 cmd= cmd, model=model,
3578 param_card=param_card,
3579 auth_skipping = False,
3580 output_path=output_path,
3581 reuse = False)
3582
3583 if not cmass_scheme and multiprocess_feynm.get('perturbation_couplings')==[]:
3584
3585 for particle in evaluator.full_model.get('particles'):
3586 if particle.get('width') != 'ZERO':
3587 evaluator.full_model.get('parameter_dict')[particle.get('width')] = 0.
3588
3589 output_f = run_multiprocs_no_crossings(get_value, multiprocess_feynm,
3590 evaluator, momentum,
3591 options=options)
3592 output = [processes_unit]
3593 for data in output_f:
3594 local_dico = {}
3595 local_dico['process'] = data['process']
3596 local_dico['value_feynm'] = data['value']
3597 local_dico['value_unit'] = [d['value'] for d in output_u
3598 if d['process'] == data['process']][0]
3599 output.append(local_dico)
3600
3601 if processes_feynm.get('perturbation_couplings')!=[] and not reuse:
3602
3603 clean_up(output_path)
3604
3605
3606 cmd.options['loop_optimized_output'] = loop_optimized_bu
3607
3608 return output
3609
3610
3611
3612
3613 else:
3614 raise InvalidCmd("processes is of non-supported format")
3615
3621 """Check complex mass scheme consistency in the offshell region of s-channels
3622 detected for this process, by varying the expansion paramer consistently
3623 with the corresponding width and making sure that the difference between
3624 the complex mass-scheme and the narrow-width approximation is higher order.
3625 """
3626
3627 if not isinstance(process_line, str):
3628 raise InvalidCmd("Proces definition must be given as a stirng for this check")
3629
3630
3631 cmd.do_set('complex_mass_scheme False', log=False)
3632
3633 multiprocess_nwa = cmd.extract_process(process_line)
3634
3635
3636 has_FRdecay = os.path.isfile(pjoin(cmd._curr_model.get('modelpath'),
3637 'decays.py'))
3638
3639
3640 missing_perturbations = cmd._curr_model.get_coupling_orders()-\
3641 set(multiprocess_nwa.get('perturbation_couplings'))
3642
3643 if len(multiprocess_nwa.get('perturbation_couplings'))>0 and \
3644 len(missing_perturbations)>0:
3645 logger.warning("------------------------------------------------------")
3646 logger.warning("The process considered does not specify the following "+
3647 "type of loops to be included : %s"%str(list(missing_perturbations)))
3648 logger.warning("Consequently, the CMS check will be unsuccessful if the"+
3649 " process involves any resonating particle whose LO decay is "+
3650 "mediated by one of these orders.")
3651 logger.warning("You can use the syntax '[virt=all]' to automatically"+
3652 " include all loops supported by the model.")
3653 logger.warning("------------------------------------------------------")
3654
3655 if len(multiprocess_nwa.get('perturbation_couplings'))>0 and \
3656 len(multiprocess_nwa.get('legs'))<=4:
3657 logger.warning("------------------------------------------------------")
3658 logger.warning("Processes with four or less external states are typically not"+\
3659 " sensitive to incorrect Complex Mass Scheme implementations.")
3660 logger.warning("You can test this sensitivity by making sure that the"+
3661 " same check on the leading-order counterpart of this process *fails*"+
3662 " when using the option '--diff_lambda_power=2'.")
3663 logger.warning("If it does not, then consider adding a massless "+
3664 "gauge vector to the external states.")
3665 logger.warning("------------------------------------------------------")
3666
3667 if options['recompute_width']=='auto':
3668 if multiprocess_nwa.get('perturbation_couplings')!=[]:
3669
3670 options['recompute_width'] = 'first_time'
3671 else:
3672 options['recompute_width'] = 'never'
3673
3674
3675 if options['recompute_width'] in ['first_time', 'always'] and \
3676 not has_FRdecay and not 'cached_widths' in options:
3677 logger.info('The LO widths will need to be recomputed but the '+
3678 'model considered does not appear to have a decay module.\nThe widths'+
3679 ' will need to be computed numerically and it will slow down the test.\n'+
3680 'Consider using a param_card already specifying correct LO widths and'+
3681 " adding the option --recompute_width=never when doing this check.")
3682
3683 if options['recompute_width']=='never' and \
3684 any(order in multiprocess_nwa.get('perturbation_couplings') for order in
3685 options['expansion_orders']):
3686 logger.warning('You chose not to recompute the widths while including'+
3687 ' loop corrections. The check will be successful only if the width'+\
3688 ' specified in the default param_card is LO accurate (Remember that'+\
3689 ' the default values of alpha_s and awem1 are set to 0.1 and 10.0'+\
3690 ' respectively by default).')
3691
3692
3693
3694
3695
3696 if options['recompute_width'] in ['first_time', 'always'] and has_FRdecay:
3697 modelname = cmd._curr_model.get('modelpath+restriction')
3698 with misc.MuteLogger(['madgraph'], ['INFO']):
3699 model = import_ufo.import_model(modelname, decay=True,
3700 complex_mass_scheme=False)
3701 multiprocess_nwa.set('model', model)
3702
3703 run_options = copy.deepcopy(options)
3704
3705
3706 if options['seed'] > 0:
3707 random.seed(options['seed'])
3708
3709
3710 run_options['param_card'] = param_card
3711 if isinstance(cmd, FakeInterface):
3712 raise MadGraph5Error, "Check CMS cannot be run with a FakeInterface."
3713 run_options['cmd'] = cmd
3714 run_options['MLOptions'] = MLOptions
3715 if output_path:
3716 run_options['output_path'] = output_path
3717 else:
3718 run_options['output_path'] = cmd._mgme_dir
3719
3720
3721 run_options['has_FRdecay'] = has_FRdecay
3722
3723
3724 if 'cached_widths' not in run_options:
3725 run_options['cached_widths'] = {}
3726
3727
3728 run_options['cached_param_card'] = {'NWA':[None,None],'CMS':[None,None]}
3729
3730 if options['tweak']['name']:
3731 logger.info("Now running the CMS check for tweak '%s'"\
3732 %options['tweak']['name'])
3733
3734 model = multiprocess_nwa.get('model')
3735
3736 for particle in model.get('particles'):
3737 mass_param = model.get_parameter(particle.get('mass'))
3738 if particle.get('mass')!='ZERO' and 'external' not in mass_param.depend:
3739 if model.get('name') not in ['sm','loop_sm']:
3740 logger.warning("The mass '%s' of particle '%s' is not an external"%\
3741 (model.get_parameter(particle.get('mass')).name,particle.get('name'))+\
3742 " parameter as required by this check. \nMG5_aMC will try to"+\
3743 " modify the model to remedy the situation. No guarantee.")
3744 status = model.change_electroweak_mode(set(['mz','mw','alpha']))
3745 if not status:
3746 raise InvalidCmd('The EW scheme could apparently not be changed'+\
3747 ' so as to have the W-boson mass external. The check cannot'+\
3748 ' proceed.')
3749 break
3750
3751 veto_orders = [order for order in model.get('coupling_orders') if \
3752 order not in options['expansion_orders']]
3753 if len(veto_orders)>0:
3754 logger.warning('You did not define any parameter scaling rule for the'+\
3755 " coupling orders %s. They will be "%','.join(veto_orders)+\
3756 "forced to zero in the tests. Consider adding the scaling rule to"+\
3757 "avoid this. (see option '--cms' in 'help check')")
3758 for order in veto_orders:
3759 multiprocess_nwa.get('orders')[order]==0
3760 multiprocess_nwa.set('perturbation_couplings', [order for order in
3761 multiprocess_nwa['perturbation_couplings'] if order not in veto_orders])
3762
3763 if multiprocess_nwa.get('perturbation_couplings')==[]:
3764 evaluator = MatrixElementEvaluator(model, param_card,
3765 cmd=cmd,auth_skipping = False, reuse = True)
3766 else:
3767 evaluator = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
3768 cmd=cmd, model=model,
3769 param_card=param_card,
3770 auth_skipping = False,
3771 output_path=output_path,
3772 reuse = False)
3773
3774 cached_information = []
3775 output_nwa = run_multiprocs_no_crossings(check_complex_mass_scheme_process,
3776 multiprocess_nwa,
3777 evaluator,
3778
3779
3780
3781
3782
3783 opt = cached_information,
3784 options=run_options)
3785
3786
3787 clean_added_globals(ADDED_GLOBAL)
3788
3789
3790 cmd.do_set('complex_mass_scheme True', log=False)
3791
3792
3793 multiprocess_cms = cmd.extract_process(process_line)
3794 model = multiprocess_cms.get('model')
3795
3796 if len(veto_orders)>0:
3797 for order in veto_orders:
3798 multiprocess_cms.get('orders')[order]==0
3799 multiprocess_cms.set('perturbation_couplings', [order for order in
3800 multiprocess_cms['perturbation_couplings'] if order not in veto_orders])
3801
3802 if multiprocess_cms.get('perturbation_couplings')==[]:
3803 evaluator = MatrixElementEvaluator(model, param_card,
3804 cmd=cmd,auth_skipping = False, reuse = True)
3805 else:
3806 evaluator = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
3807 cmd=cmd, model=model,
3808 param_card=param_card,
3809 auth_skipping = False,
3810 output_path=output_path,
3811 reuse = False)
3812
3813 output_cms = run_multiprocs_no_crossings(check_complex_mass_scheme_process,
3814 multiprocess_cms,
3815 evaluator,
3816
3817 opt = dict(cached_information),
3818 options=run_options)
3819
3820 if multiprocess_cms.get('perturbation_couplings')!=[] and not options['reuse']:
3821
3822 clean_up(output_path)
3823
3824
3825
3826
3827 result = {'ordered_processes':[],'lambdaCMS':options['lambdaCMS']}
3828
3829 result['perturbation_orders']=multiprocess_nwa.get('perturbation_couplings')
3830 for i, proc_res in enumerate(output_nwa):
3831 result['ordered_processes'].append(proc_res[0])
3832 result[proc_res[0]] = {
3833 'NWA':proc_res[1]['resonances_result'],
3834 'CMS':output_cms[i][1]['resonances_result'],
3835 'born_order':proc_res[1]['born_order'],
3836 'loop_order':proc_res[1]['loop_order']}
3837
3838
3839
3840 options['cached_widths'] = run_options['cached_widths']
3841
3842
3843 result['recompute_width'] = options['recompute_width']
3844 result['has_FRdecay'] = has_FRdecay
3845 result['widths_computed'] = []
3846 cached_widths = sorted(options['cached_widths'].items(), key=lambda el: \
3847 abs(el[0][0]))
3848 for (pdg, lambda_value), width in cached_widths:
3849 if lambda_value != 1.0:
3850 continue
3851 result['widths_computed'].append((model.get_particle(pdg).get_name(),
3852 width))
3853
3854
3855 clean_added_globals(ADDED_GLOBAL)
3856
3857 return result
3858
3863 """Check CMS for the process in argument. The options 'opt' is quite important.
3864 When opt is a list, it means that we are doing NWA and we are filling the
3865 list with the following tuple
3866 ('proc_name',({'ParticlePDG':ParticlePDG,
3867 'FinalStateMothersNumbers':set([]),
3868 'PS_point_used':[]},...))
3869 When opt is a dictionary, we are in the CMS mode and it will be reused then.
3870 """
3871
3872
3873
3874 NLO = process.get('perturbation_couplings') != []
3875
3876 def glue_momenta(production, decay):
3877 """ Merge together the kinematics for the production of particle
3878 positioned last in the 'production' array with the 1>N 'decay' kinematic'
3879 provided where the decay particle is first."""
3880
3881 from MadSpin.decay import momentum
3882
3883 full = production[:-1]
3884
3885
3886
3887
3888
3889 for p in decay[1:]:
3890 bp = momentum(*p).boost(momentum(*production[-1]))
3891 full.append([bp.E,bp.px,bp.py,bp.pz])
3892
3893 return full
3894
3895 def find_resonances(diagrams):
3896 """ Find all the resonances in the matrix element in argument """
3897
3898 model = process['model']
3899 resonances_found = []
3900
3901 for ll, diag in enumerate(diagrams):
3902 for amp in diag.get('amplitudes'):
3903
3904
3905 s_channels, t_channels = amp.\
3906 get_s_and_t_channels(process.get_ninitial(), model, 0)
3907
3908
3909 replacement_dict = {}
3910 for s_channel in s_channels:
3911 new_resonance = {
3912 'ParticlePDG':s_channel.get('legs')[-1].get('id'),
3913 'FSMothersNumbers':[],
3914 'PS_point_used':[]}
3915 for leg in s_channel.get('legs')[:-1]:
3916 if leg.get('number')>0:
3917 new_resonance['FSMothersNumbers'].append(
3918 leg.get('number'))
3919 else:
3920 try:
3921 new_resonance['FSMothersNumbers'].extend(
3922 replacement_dict[leg.get('number')])
3923 except KeyError:
3924 raise Exception, 'The following diagram '+\
3925 'is malformed:'+diag.nice_string()
3926
3927 replacement_dict[s_channel.get('legs')[-1].get('number')] = \
3928 new_resonance['FSMothersNumbers']
3929 new_resonance['FSMothersNumbers'] = set(
3930 new_resonance['FSMothersNumbers'])
3931 if new_resonance not in resonances_found:
3932 resonances_found.append(new_resonance)
3933
3934
3935 kept_resonances = []
3936 for resonance in resonances_found:
3937
3938 if resonance['ParticlePDG'] == 0:
3939 continue
3940
3941
3942 if abs(resonance['ParticlePDG']) in \
3943 [abs(l.get('id')) for l in process.get('legs')]:
3944 continue
3945
3946 mass_string = evaluator.full_model.get_particle(
3947 resonance['ParticlePDG']).get('mass')
3948 mass = evaluator.full_model.get('parameter_dict')[mass_string].real
3949
3950 if mass==0.0:
3951 continue
3952
3953 width_string = evaluator.full_model.get_particle(
3954 resonance['ParticlePDG']).get('width')
3955 width = evaluator.full_model.get('parameter_dict')[width_string].real
3956
3957
3958 if width==0.0:
3959 continue
3960
3961 final_state_energy = sum(
3962 evaluator.full_model.get('parameter_dict')[
3963 evaluator.full_model.get_particle(l.get('id')).get('mass')].real
3964 for l in process.get('legs') if l.get('number') in
3965 resonance['FSMothersNumbers'])
3966
3967
3968 special_mass = (1.0 + options['offshellness'])*mass
3969
3970
3971 if special_mass<final_state_energy:
3972 raise InvalidCmd('The offshellness specified (%s) is such'\
3973 %options['offshellness']+' that the resulting kinematic is '+\
3974 'impossible for resonance %s %s.'%(evaluator.full_model.
3975 get_particle(resonance['ParticlePDG']).get_name(),
3976 str(list(resonance['FSMothersNumbers']))))
3977 continue
3978
3979
3980 kept_resonances.append(resonance)
3981
3982 for resonance in kept_resonances:
3983
3984 set_PSpoint(resonance, force_other_res_offshell=kept_resonances)
3985
3986
3987
3988 return tuple(kept_resonances)
3989
3990 def set_PSpoint(resonance, force_other_res_offshell=[],
3991 allow_energy_increase=1.5, isolation_cuts=True):
3992 """ Starting from the specified resonance, construct a phase space point
3993 for it and possibly also enforce other resonances to be onshell. Possibly
3994 allow to progressively increase enregy by steps of the integer specified
3995 (negative float to forbid it) and possible enforce default isolation cuts
3996 as well."""
3997
3998 def invmass(momenta):
3999 """ Computes the invariant mass of a list of momenta."""
4000 ptot = [sum(p[i] for p in momenta) for i in range(4)]
4001 return math.sqrt(ptot[0]**2-ptot[1]**2-ptot[2]**2-ptot[3]**2)
4002
4003 model = evaluator.full_model
4004 def getmass(pdg):
4005 """ Returns the mass of a particle given the current model and its
4006 pdg given in argument."""
4007 return model.get('parameter_dict')[
4008 model.get_particle(pdg).get('mass')].real
4009
4010 N_trials = 0
4011 max_trial = 1e4
4012 nstep_for_energy_increase = 1e3
4013 PS_point_found = None
4014 if options['offshellness'] > 0.0:
4015 offshellness = options['offshellness']
4016 else:
4017
4018
4019
4020
4021 offshellness = (0.25*(options['offshellness']+1.0))-1.0
4022
4023
4024
4025
4026 if options['offshellness'] < 0.0:
4027 energy_increase = math.sqrt(allow_energy_increase)
4028 else:
4029 energy_increase = allow_energy_increase
4030
4031 other_res_offshell = [res for res in force_other_res_offshell if
4032 res!=resonance]
4033
4034
4035
4036 all_other_res_masses = [getmass(res['ParticlePDG'])
4037 for res in other_res_offshell]
4038 resonance_mass = getmass(resonance['ParticlePDG'])
4039
4040 str_res = '%s %s'%(model.get_particle(
4041 resonance['ParticlePDG']).get_name(),
4042 str(list(resonance['FSMothersNumbers'])))
4043 leg_number_to_leg = dict((l.get('number'),l) for l in process.get('legs'))
4044
4045
4046
4047 daughter_masses = sum(getmass(leg_number_to_leg[\
4048 number].get('id')) for number in resonance['FSMothersNumbers'])
4049 min_offshellnes = 4.0*((daughter_masses*1.2)/resonance_mass)-1.0
4050
4051
4052
4053 min_energy = max(sum(getmass(l.get('id')) for l in \
4054 process.get('legs') if l.get('state')==True),
4055 sum(getmass(l.get('id')) for l in \
4056 process.get('legs') if l.get('state')==False))
4057
4058
4059
4060 daughter_offshellnesses = [(1.0+options['offshellness'])*mass
4061 for i, mass in enumerate(all_other_res_masses) if
4062 other_res_offshell[i]['FSMothersNumbers'].issubset(
4063 resonance['FSMothersNumbers'])]
4064
4065 if options['offshellness'] >= 0.0:
4066
4067 if len(daughter_offshellnesses)>0:
4068 max_mass = max(daughter_offshellnesses)
4069
4070 offshellness = max(2.0*(max_mass/resonance_mass)-1.0,
4071 options['offshellness'])
4072
4073 max_mass = max([(1.0+options['offshellness'])*mass for mass in \
4074 all_other_res_masses]+[(1.0+offshellness)*resonance_mass])
4075
4076
4077
4078 target = max(min_energy*1.2,max_mass*2.0)
4079 if target > options['energy']:
4080 logger.warning("The user-defined energy %f seems "%options['energy']+
4081 " insufficient to reach the minimum propagator invariant mass "+
4082 "%f required for the chosen offshellness %f."%(max_mass,
4083 options['offshellness']) + " Energy reset to %f."%target)
4084 options['energy'] = target
4085
4086 else:
4087 if len(daughter_offshellnesses) > 0:
4088 min_mass = min(daughter_offshellnesses)
4089
4090 offshellness = min(0.25*(min_mass/resonance_mass)-1.0,
4091 options['offshellness'])
4092
4093
4094
4095 if (1.0+offshellness)*resonance_mass < daughter_masses*1.2:
4096 msg = 'The resonance %s cannot accomodate'%str_res+\
4097 ' an offshellness of %f because the daughter'%options['offshellness']+\
4098 ' masses are %f.'%daughter_masses
4099 if options['offshellness']<min_offshellnes:
4100 msg += ' Try again with an offshellness'+\
4101 ' smaller (in absolute value) of at least %f.'%min_offshellnes
4102 else:
4103 msg += ' Try again with a smalled offshellness (in absolute value).'
4104 raise InvalidCmd(msg)
4105
4106 min_mass = min([(1.0+options['offshellness'])*mass for mass in \
4107 all_other_res_masses]+[(1.0+offshellness)*resonance_mass])
4108
4109
4110 if 2.0*min_mass < options['energy']:
4111 new_energy = max(min_energy*1.2, 2.0*min_mass)
4112 logger.warning("The user-defined energy %f seems "%options['energy']+
4113 " too large to not overshoot the maximum propagator invariant mass "+
4114 "%f required for the chosen offshellness %f."%(min_mass,
4115 options['offshellness']) + " Energy reset to %f."%new_energy)
4116 options['energy'] = new_energy
4117
4118 if options['offshellness'] < 0.0 and options['energy'] >= min_mass:
4119 logger.debug("The target energy is not compatible with the mass"+
4120 " of the external states for this process (%f). It is "%min_mass+
4121 "unlikely that a valid kinematic configuration will be found.")
4122
4123 if options['offshellness']<0.0 and offshellness<options['offshellness'] or \
4124 options['offshellness']>0.0 and offshellness>options['offshellness']:
4125 logger.debug("Offshellness increased to %f"%offshellness+
4126 " so as to try to find a kinematical configuration with"+
4127 " offshellness at least equal to %f"%options['offshellness']+
4128 " for all resonances.")
4129
4130 start_energy = options['energy']
4131 while N_trials<max_trial:
4132 N_trials += 1
4133 if N_trials%nstep_for_energy_increase==0:
4134 if allow_energy_increase > 0.0:
4135 old_offshellness = offshellness
4136 if offshellness > 0.0:
4137 options['energy'] *= energy_increase
4138 offshellness *= energy_increase
4139 else:
4140 options['energy'] = max(options['energy']/energy_increase,
4141 min_energy*1.2)
4142 offshellness = max(min_offshellnes,
4143 ((offshellness+1.0)/energy_increase)-1.0)
4144 if old_offshellness!=offshellness:
4145 logger.debug('Trying to find a valid kinematic'+\
4146 " configuration for resonance '%s'"%str_res+\
4147 ' with increased offshellness %f'%offshellness)
4148
4149 candidate = get_PSpoint_for_resonance(resonance, offshellness)
4150 pass_offshell_test = True
4151 for i, res in enumerate(other_res_offshell):
4152
4153 if offshellness > 0.0:
4154 if invmass([candidate[j-1] for j in res['FSMothersNumbers']]) <\
4155 ((1.0+options['offshellness'])*all_other_res_masses[i]):
4156 pass_offshell_test = False
4157 break
4158 else:
4159 if invmass([candidate[j-1] for j in res['FSMothersNumbers']]) >\
4160 ((1.0+options['offshellness'])*all_other_res_masses[i]):
4161 pass_offshell_test = False
4162 break
4163 if not pass_offshell_test:
4164 continue
4165
4166 if isolation_cuts:
4167
4168 if not evaluator.pass_isolation_cuts(candidate,
4169 ptcut=0.05*invmass([candidate[0],candidate[1]]), drcut=0.4):
4170 continue
4171 PS_point_found = candidate
4172 break
4173
4174
4175 options['energy'] = start_energy
4176
4177 if PS_point_found is None:
4178 err_msg = 'Could not find a valid PS point in %d'%max_trial+\
4179 ' trials. Try increasing the energy, modify the offshellness '+\
4180 'or relax some constraints.'
4181 if options['offshellness']<0.0:
4182 err_msg +='Try with a positive offshellness instead (or a '+\
4183 'negative one of smaller absolute value)'
4184 raise InvalidCmd, err_msg
4185 else:
4186
4187
4188 resonance['offshellnesses'] = []
4189 all_other_res_masses = [resonance_mass] + all_other_res_masses
4190 other_res_offshell = [resonance] + other_res_offshell
4191 for i, res in enumerate(other_res_offshell):
4192 if i==0:
4193 res_str = 'self'
4194 else:
4195 res_str = '%s %s'%(model.get_particle(
4196 res['ParticlePDG']).get_name(),
4197 str(list(res['FSMothersNumbers'])))
4198 resonance['offshellnesses'].append((res_str,(
4199 (invmass([PS_point_found[j-1] for j in
4200 res['FSMothersNumbers']])/all_other_res_masses[i])-1.0)))
4201
4202 resonance['PS_point_used'] = PS_point_found
4203
4204 def get_PSpoint_for_resonance(resonance, offshellness = options['offshellness']):
4205 """ Assigns a kinematic configuration to the resonance dictionary
4206 given in argument."""
4207
4208
4209 mass_string = evaluator.full_model.get_particle(
4210 resonance['ParticlePDG']).get('mass')
4211 mass = evaluator.full_model.get('parameter_dict')[mass_string].real
4212
4213
4214 special_mass = (1.0 + offshellness)*mass
4215
4216
4217 prod_proc = base_objects.Process({'legs':base_objects.LegList(
4218 copy.copy(leg) for leg in process.get('legs') if
4219 leg.get('number') not in resonance['FSMothersNumbers'])})
4220
4221
4222
4223 prod_proc.get('legs').append(base_objects.Leg({
4224 'number':max(l.get('number') for l in process.get('legs'))+1,
4225 'state':True,
4226 'id':0}))
4227
4228 decay_proc = base_objects.Process({'legs':base_objects.LegList(
4229 copy.copy(leg) for leg in process.get('legs') if leg.get('number')
4230 in resonance['FSMothersNumbers'] and not leg.get('state')==False)})
4231
4232
4233
4234
4235 decay_proc.get('legs').insert(0,base_objects.Leg({
4236 'number':-1,
4237 'state':False,
4238 'id':0}))
4239 prod_kinematic = evaluator.get_momenta(prod_proc, options=options,
4240 special_mass=special_mass)[0]
4241 decay_kinematic = evaluator.get_momenta(decay_proc, options=options,
4242 special_mass=special_mass)[0]
4243 momenta = glue_momenta(prod_kinematic,decay_kinematic)
4244
4245
4246
4247 ordered_momenta = [(prod_proc.get('legs')[i].get('number'),momenta[i])
4248 for i in range(len(prod_proc.get('legs'))-1)]
4249
4250 ordered_momenta += [(decay_proc.get('legs')[-i].get('number'),
4251 momenta[-i]) for i in range(1,len(decay_proc.get('legs')))]
4252
4253
4254 return [m[1] for m in sorted(ordered_momenta, key = lambda el: el[0])]
4255
4256
4257
4258 @misc.mute_logger()
4259 def get_width(PDG, lambdaCMS, param_card):
4260 """ Returns the width to use for particle with absolute PDG 'PDG' and
4261 for the the lambdaCMS value 'lambdaCMS' using the cache if possible."""
4262
4263
4264
4265 if abs(PDG) in [abs(leg.get('id')) for leg in process.get('legs')]:
4266 return 0.0
4267
4268 particle = evaluator.full_model.get_particle(PDG)
4269
4270
4271
4272 if particle.get('ghost') or particle.get('goldstone'):
4273 return 0.0
4274
4275
4276 if particle.get('width')=='ZERO':
4277 return 0.0
4278
4279 if (PDG,lambdaCMS) in options['cached_widths']:
4280 return options['cached_widths'][(PDG,lambdaCMS)]
4281
4282 if options['recompute_width'] == 'never':
4283 width = evaluator.full_model.\
4284 get('parameter_dict')[particle.get('width')].real
4285 else:
4286
4287 if aloha.complex_mass:
4288 raise MadGraph5Error, "The width for particle with PDG %d and"%PDG+\
4289 " lambdaCMS=%f should have already been "%lambdaCMS+\
4290 "computed during the NWA run."
4291
4292
4293 if options['recompute_width'] in ['always','first_time']:
4294 particle_name = particle.get_name()
4295 with misc.TMP_directory(dir=options['output_path']) as path:
4296 param_card.write(pjoin(path,'tmp.dat'))
4297
4298
4299
4300 command = '%s --output=%s'%(particle_name,pjoin(path,'tmp.dat'))+\
4301 ' --path=%s --body_decay=2'%pjoin(path,'tmp.dat')+\
4302 ' --precision_channel=0.001'
4303
4304 param_card.write(pjoin(options['output_path'],'tmp.dat'))
4305
4306
4307
4308 orig_model = options['cmd']._curr_model
4309 orig_helas_model = options['cmd']._curr_helas_model
4310 options['cmd'].do_compute_widths(command, evaluator.full_model)
4311
4312 options['cmd']._curr_model = orig_model
4313 options['cmd']._curr_helas_model = orig_helas_model
4314
4315
4316 evaluator.full_model.set_parameters_and_couplings(
4317 param_card=param_card)
4318 try:
4319 tmp_param_card = check_param_card.ParamCard(pjoin(path,'tmp.dat'))
4320 except:
4321 raise MadGraph5Error, 'Error occured during width '+\
4322 'computation with command:\n compute_widths %s'%command
4323 width = tmp_param_card['decay'].get(PDG).value
4324
4325
4326
4327
4328
4329
4330
4331 if options['recompute_width'] in ['never','first_time']:
4332
4333 for lam in options['lambdaCMS']:
4334 options['cached_widths'][(PDG,lam)]=width*(lam/lambdaCMS)
4335 else:
4336 options['cached_widths'][(PDG,lambdaCMS)] = width
4337
4338 return options['cached_widths'][(PDG,lambdaCMS)]
4339
4340 def get_order(diagrams, diagsName):
4341 """Compute the common summed of coupling orders used for this cms check
4342 in the diagrams specified. When inconsistency occurs, use orderName
4343 in the warning message if throwm."""
4344
4345 orders = set([])
4346 for diag in diagrams:
4347 diag_orders = diag.calculate_orders()
4348 orders.add(sum((diag_orders[order] if order in diag_orders else 0)
4349 for order in options['expansion_orders']))
4350 if len(orders)>1:
4351 logger.warning(msg%('%s '%diagsName,str(orders)))
4352 return min(list(orders))
4353 else:
4354 return list(orders)[0]
4355
4356 MLoptions = copy.copy(options['MLOptions'])
4357
4358 MLoptions['DoubleCheckHelicityFilter'] = False
4359
4360
4361 for tweak in options['tweak']['custom']:
4362 if tweak.startswith('seed'):
4363 try:
4364 new_seed = int(tweak[4:])
4365 except ValueError:
4366 raise MadGraph5Error, "Seed '%s' is not of the right format 'seed<int>'."%tweak
4367 random.seed(new_seed)
4368
4369 mode = 'CMS' if aloha.complex_mass else 'NWA'
4370 for i, leg in enumerate(process.get('legs')):
4371 leg.set('number', i+1)
4372
4373 logger.info("Running CMS check for process %s (now doing %s scheme)" % \
4374 ( process.nice_string().replace('Process:', 'process'), mode))
4375
4376 proc_dir = None
4377 resonances = None
4378 warning_msg = "All %sdiagrams do not share the same sum of orders "+\
4379 "%s; found %%s."%(','.join(options['expansion_orders']))+\
4380 " This potentially problematic for the CMS check."
4381 if NLO:
4382
4383
4384
4385 if options['name']=='auto':
4386 proc_name = "%s%s_%s%s__%s__"%(('SAVED' if options['reuse'] else ''),
4387 temp_dir_prefix, '_'.join(process.shell_string().split('_')[1:]),
4388 ('_' if process.get('perturbation_couplings') else '')+
4389 '_'.join(process.get('perturbation_couplings')),mode)
4390 else:
4391 proc_name = "%s%s_%s__%s__"%(('SAVED' if options['reuse'] else ''),
4392 temp_dir_prefix,options['name'], mode)
4393
4394 timing, matrix_element = generate_loop_matrix_element(process,
4395 options['reuse'], output_path=options['output_path'],
4396 cmd = options['cmd'], proc_name=proc_name,
4397 loop_filter=options['loop_filter'])
4398 if matrix_element is None:
4399
4400 return None
4401
4402 reusing = isinstance(matrix_element, base_objects.Process)
4403 proc_dir = pjoin(options['output_path'],proc_name)
4404
4405
4406 infos = evaluator.setup_process(matrix_element, proc_dir,
4407 reusing = reusing, param_card = options['param_card'],
4408 MLOptions=MLoptions)
4409
4410 evaluator.fix_MadLoopParamCard(pjoin(proc_dir,'Cards'),
4411 mp = None, loop_filter = True,MLOptions=MLoptions)
4412
4413
4414 tmp_card_backup = pjoin(proc_dir,'Cards','param_card.dat__TemporaryBackup__')
4415 if os.path.isfile(tmp_card_backup):
4416
4417 logger.info("Last run in process '%s' apparently aborted."%proc_dir+\
4418 " Now reverting 'param_card.dat' to its original value.")
4419 shutil.copy(tmp_card_backup, pjoin(proc_dir, 'Cards','param_card.dat'))
4420 else:
4421
4422 shutil.copy(pjoin(proc_dir,'Cards','param_card.dat'), tmp_card_backup)
4423
4424 tmp_modelfunc_backup = pjoin(proc_dir,'Source','MODEL',
4425 'model_functions.f__TemporaryBackup__')
4426 if os.path.isfile(tmp_modelfunc_backup):
4427
4428 logger.info("Last run in process '%s' apparently aborted."%proc_dir+\
4429 " Now reverting 'model_functions.f' to its original value.")
4430 shutil.copy(tmp_modelfunc_backup, pjoin(proc_dir,'Source','MODEL',
4431 'model_functions.f'))
4432 evaluator.apply_log_tweak(proc_dir, 'recompile')
4433 else:
4434
4435 shutil.copy(pjoin(proc_dir,'Source','MODEL','model_functions.f'),
4436 tmp_modelfunc_backup)
4437
4438
4439 MadLoopInitializer.fix_PSPoint_in_check(pjoin(proc_dir,'SubProcesses'),
4440 read_ps = True, npoints = 1, hel_config = options['helicity'],
4441 split_orders=options['split_orders'])
4442
4443
4444
4445 for dir in misc.glob('P*_*', pjoin(proc_dir,'SubProcesses')):
4446 if not (re.search(r'.*P\d+_\w*$', dir) or not os.path.isdir(dir)):
4447 continue
4448 try:
4449 os.remove(pjoin(dir,'check'))
4450 os.remove(pjoin(dir,'check_sa.o'))
4451 except OSError:
4452 pass
4453
4454 with open(os.devnull, 'w') as devnull:
4455 retcode = subprocess.call(['make','check'],
4456 cwd=dir, stdout=devnull, stderr=devnull)
4457 if retcode != 0:
4458 raise MadGraph5Error, "Compilation error with "+\
4459 "'make check' in %s"%dir
4460
4461
4462 pkl_path = pjoin(proc_dir,'resonance_specs.pkl')
4463 if reusing:
4464
4465
4466 if not os.path.isfile(pkl_path):
4467 raise InvalidCmd('The folder %s could'%proc_dir+\
4468 " not be reused because the resonance specification file "+
4469 "'resonance_specs.pkl' is missing.")
4470 else:
4471 proc_name, born_order, loop_order, resonances = \
4472 save_load_object.load_from_file(pkl_path)
4473
4474
4475 for res in resonances:
4476 set_PSpoint(res, force_other_res_offshell=resonances)
4477
4478
4479 if isinstance(opt, list):
4480 opt.append((proc_name, resonances))
4481 else:
4482 resonances = opt
4483 else:
4484 helas_born_diagrams = matrix_element.get_born_diagrams()
4485 if len(helas_born_diagrams)==0:
4486 logger.warning('The CMS check for loop-induced process is '+\
4487 'not yet available (nor is it very interesting).')
4488 return None
4489 born_order = get_order(helas_born_diagrams,'Born')
4490 loop_order = get_order(matrix_element.get_loop_diagrams(),'loop')
4491
4492
4493 if isinstance(opt, list):
4494 opt.append((process.base_string(),find_resonances(helas_born_diagrams)))
4495 resonances = opt[-1][1]
4496 else:
4497 resonances = opt
4498
4499
4500 save_load_object.save_to_file(pkl_path, (process.base_string(),
4501 born_order, loop_order,resonances))
4502
4503 else:
4504
4505 try:
4506 amplitude = diagram_generation.Amplitude(process)
4507 except InvalidCmd:
4508 logging.info("No diagrams for %s" % \
4509 process.nice_string().replace('Process', 'process'))
4510 return None
4511 if not amplitude.get('diagrams'):
4512
4513 logging.info("No diagrams for %s" % \
4514 process.nice_string().replace('Process', 'process'))
4515 return None
4516
4517 matrix_element = helas_objects.HelasMatrixElement(amplitude,
4518 gen_color=True)
4519 diagrams = matrix_element.get('diagrams')
4520 born_order = get_order(diagrams,'Born')
4521
4522 loop_order = -1
4523
4524 if isinstance(opt, list):
4525 opt.append((process.base_string(),find_resonances(diagrams)))
4526 resonances = opt[-1][1]
4527 else:
4528 resonances= opt
4529
4530 if len(resonances)==0:
4531 logger.info("No resonance found for process %s."\
4532 %process.base_string())
4533 return None
4534
4535
4536 if not options['cached_param_card'][mode][0]:
4537 if NLO:
4538 param_card = check_param_card.ParamCard(
4539 pjoin(proc_dir,'Cards','param_card.dat'))
4540 else:
4541 param_card = check_param_card.ParamCard(
4542 StringIO.StringIO(evaluator.full_model.write_param_card()))
4543 options['cached_param_card'][mode][0] = param_card
4544 name2block, _ = param_card.analyze_param_card()
4545 options['cached_param_card'][mode][1] = name2block
4546
4547 else:
4548 param_card = options['cached_param_card'][mode][0]
4549 name2block = options['cached_param_card'][mode][1]
4550
4551
4552 if loop_order != -1 and (loop_order+born_order)%2 != 0:
4553 raise MadGraph5Error, 'The summed squared matrix element '+\
4554 " order '%d' is not even."%(loop_order+born_order)
4555 result = {'born_order':born_order,
4556 'loop_order': (-1 if loop_order==-1 else (loop_order+born_order)/2),
4557 'resonances_result':[]}
4558
4559
4560 if NLO:
4561 try:
4562 shutil.copy(pjoin(proc_dir,'Cards','param_card.dat'),
4563 pjoin(proc_dir,'Cards','param_card.dat__backUp__'))
4564 except:
4565 pass
4566
4567
4568 had_log_tweaks=False
4569 if NLO:
4570 for tweak in options['tweak']['custom']:
4571 if tweak.startswith('seed'):
4572 continue
4573 try:
4574 logstart, logend = tweak.split('->')
4575 except:
4576 raise Madgraph5Error, "Tweak '%s' not reckognized."%tweak
4577 if logstart in ['logp','logm', 'log'] and \
4578 logend in ['logp','logm', 'log']:
4579 if NLO:
4580 evaluator.apply_log_tweak(proc_dir, [logstart, logend])
4581 had_log_tweaks = True
4582 else:
4583 raise Madgraph5Error, "Tweak '%s' not reckognized."%tweak
4584 if had_log_tweaks:
4585 evaluator.apply_log_tweak(proc_dir, 'recompile')
4586
4587
4588 if options['resonances']=='all':
4589 resonances_to_run = resonances
4590 elif isinstance(options['resonances'],int):
4591 resonances_to_run = resonances[:options['resonances']]
4592 elif isinstance(options['resonances'],list):
4593 resonances_to_run = []
4594 for res in resonances:
4595 for res_selection in options['resonances']:
4596 if abs(res['ParticlePDG'])==res_selection[0] and \
4597 res['FSMothersNumbers']==set(res_selection[1]):
4598 resonances_to_run.append(res)
4599 break
4600 else:
4601 raise InvalidCmd("Resonance selection '%s' not reckognized"%\
4602 str(options['resonances']))
4603
4604
4605
4606 if NLO and options['show_plot']:
4607 widgets = ['ME evaluations:', pbar.Percentage(), ' ',
4608 pbar.Bar(),' ', pbar.ETA(), ' ']
4609 progress_bar = pbar.ProgressBar(widgets=widgets,
4610 maxval=len(options['lambdaCMS'])*len(resonances_to_run), fd=sys.stdout)
4611 progress_bar.update(0)
4612
4613 sys.stdout.flush()
4614 else:
4615 progress_bar = None
4616
4617 for resNumber, res in enumerate(resonances_to_run):
4618
4619
4620 result['resonances_result'].append({'resonance':res,'born':[]})
4621 if NLO:
4622 result['resonances_result'][-1]['finite'] = []
4623
4624 for lambdaNumber, lambdaCMS in enumerate(options['lambdaCMS']):
4625
4626
4627 new_param_card = check_param_card.ParamCard(param_card)
4628
4629 for param, replacement in options['expansion_parameters'].items():
4630
4631
4632 orig_param = param.replace('__tmpprefix__','')
4633 if orig_param not in name2block:
4634
4635
4636
4637 continue
4638 for block, lhaid in name2block[orig_param]:
4639 orig_value = float(param_card[block].get(lhaid).value)
4640 new_value = eval(replacement,
4641 {param:orig_value,'lambdacms':lambdaCMS})
4642 new_param_card[block].get(lhaid).value=new_value
4643
4644
4645
4646
4647
4648
4649
4650
4651 evaluator.full_model.set_parameters_and_couplings(
4652 param_card=new_param_card)
4653
4654 for decay in new_param_card['decay'].keys():
4655 if mode=='CMS':
4656 new_width = get_width(abs(decay[0]), lambdaCMS,
4657 new_param_card)
4658 else:
4659 new_width = 0.0
4660 new_param_card['decay'].get(decay).value= new_width
4661
4662
4663 evaluator.full_model.set_parameters_and_couplings(
4664 param_card=new_param_card)
4665 if NLO:
4666 new_param_card.write(pjoin(proc_dir,'Cards','param_card.dat'))
4667
4668
4669 if lambdaCMS==1.0 and mode=='CMS' and \
4670 options['recompute_width'] in ['always','first_time']:
4671 new_param_card.write(pjoin(proc_dir,
4672 'Cards','param_card.dat_recomputed_widths'))
4673
4674
4675
4676 if mode=='NWA' and (options['recompute_width']=='always' or (
4677 options['recompute_width']=='first_time' and lambdaCMS==1.0)):
4678
4679 tmp_param_card = check_param_card.ParamCard(new_param_card)
4680
4681
4682 for decay in new_param_card['decay'].keys():
4683 particle_name = evaluator.full_model.get_particle(\
4684 abs(decay[0])).get_name()
4685 new_width = get_width(abs(decay[0]),lambdaCMS,new_param_card)
4686 tmp_param_card['decay'].get(decay).value = new_width
4687 if not options['has_FRdecay'] and new_width != 0.0 and \
4688 (abs(decay[0]),lambdaCMS) not in options['cached_widths']:
4689 logger.info('Numerically computed width of particle'+\
4690 ' %s for lambda=%.4g : %-9.6gGeV'%
4691 (particle_name,lambdaCMS,new_width))
4692
4693
4694
4695 if lambdaCMS==1.0 and NLO:
4696 tmp_param_card.write(pjoin(proc_dir,
4697 'Cards','param_card.dat_recomputed_widths'))
4698
4699
4700 for param, replacement in options['tweak']['params'].items():
4701
4702
4703 orig_param = param.replace('__tmpprefix__','')
4704
4705 if orig_param.lower() == 'allwidths':
4706
4707 for decay in new_param_card['decay'].keys():
4708 orig_value = float(new_param_card['decay'].get(decay).value)
4709 new_value = eval(replacement,
4710 {param:orig_value,'lambdacms':lambdaCMS})
4711 new_param_card['decay'].get(decay).value = new_value
4712 continue
4713 if orig_param not in name2block:
4714
4715
4716 continue
4717 for block, lhaid in name2block[orig_param]:
4718 orig_value = float(new_param_card[block].get(lhaid).value)
4719 new_value = eval(replacement,
4720 {param:orig_value,'lambdacms':lambdaCMS})
4721 new_param_card[block].get(lhaid).value=new_value
4722
4723 if options['tweak']['params']:
4724
4725 evaluator.full_model.set_parameters_and_couplings(
4726 param_card=new_param_card)
4727 if NLO:
4728 new_param_card.write(pjoin(proc_dir,'Cards','param_card.dat'))
4729
4730
4731 if NLO:
4732 ME_res = LoopMatrixElementEvaluator.get_me_value(process, 0,
4733 proc_dir, PSpoint=res['PS_point_used'], verbose=False,
4734 format='dict', skip_compilation=True)
4735
4736
4737
4738
4739 result['resonances_result'][-1]['born'].append(ME_res['born'])
4740 result['resonances_result'][-1]['finite'].append(
4741 ME_res['finite']*ME_res['born']*ME_res['alphaS_over_2pi'])
4742 else:
4743 ME_res = evaluator.evaluate_matrix_element(matrix_element,
4744 p=res['PS_point_used'], auth_skipping=False, output='m2')[0]
4745 result['resonances_result'][-1]['born'].append(ME_res)
4746 if not progress_bar is None:
4747 progress_bar.update(resNumber*len(options['lambdaCMS'])+\
4748 (lambdaNumber+1))
4749
4750 sys.stdout.flush()
4751
4752
4753 log_reversed = False
4754 for tweak in options['tweak']['custom']:
4755 if tweak.startswith('log') and had_log_tweaks:
4756 if log_reversed:
4757 continue
4758 if NLO:
4759 evaluator.apply_log_tweak(proc_dir, 'default')
4760 evaluator.apply_log_tweak(proc_dir, 'recompile')
4761 log_reversed = True
4762
4763
4764 evaluator.full_model.set_parameters_and_couplings(param_card=param_card)
4765 if NLO:
4766 try:
4767 shutil.copy(pjoin(proc_dir,'Cards','param_card.dat__backUp__'),
4768 pjoin(proc_dir,'Cards','param_card.dat'))
4769 except:
4770 param_card.write(pjoin(proc_dir,'Cards','param_card.dat'))
4771
4772
4773
4774 try:
4775 os.remove(pjoin(proc_dir,'Cards','param_card.dat__TemporaryBackup__'))
4776 os.remove(pjoin(proc_dir,'Source','MODEL',
4777 'model_functions.f__TemporaryBackup__'))
4778 except:
4779 pass
4780
4781 return (process.nice_string().replace('Process:', '').strip(),result)
4782
4783 -def get_value(process, evaluator, p=None, options=None):
4784 """Return the value/momentum for a phase space point"""
4785
4786 for i, leg in enumerate(process.get('legs')):
4787 leg.set('number', i+1)
4788
4789 logger.info("Checking %s in %s gauge" % \
4790 ( process.nice_string().replace('Process:', 'process'),
4791 'unitary' if aloha.unitary_gauge else 'feynman'))
4792
4793 legs = process.get('legs')
4794
4795
4796 try:
4797 if process.get('perturbation_couplings')==[]:
4798 amplitude = diagram_generation.Amplitude(process)
4799 else:
4800 amplitude = loop_diagram_generation.LoopAmplitude(process)
4801 except InvalidCmd:
4802 logging.info("No diagrams for %s" % \
4803 process.nice_string().replace('Process', 'process'))
4804 return None
4805
4806 if not amplitude.get('diagrams'):
4807
4808 logging.info("No diagrams for %s" % \
4809 process.nice_string().replace('Process', 'process'))
4810 return None
4811
4812 if not p:
4813
4814 p, w_rambo = evaluator.get_momenta(process, options)
4815
4816
4817 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
4818 matrix_element = helas_objects.HelasMatrixElement(amplitude,
4819 gen_color = True)
4820 else:
4821 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
4822 gen_color = True, optimized_output = evaluator.loop_optimized_output)
4823
4824 mvalue = evaluator.evaluate_matrix_element(matrix_element, p=p,
4825 output='jamp',options=options)
4826
4827 if mvalue and mvalue['m2']:
4828 return {'process':process.base_string(),'value':mvalue,'p':p}
4829
4831 """Present the results of a comparison in a nice list format for loop
4832 processes. It detail the results from each lorentz transformation performed.
4833 """
4834
4835 process = comparison_results[0]['process']
4836 results = comparison_results[0]['results']
4837
4838
4839 threshold_rotations = 1e-6
4840
4841
4842
4843 threshold_boosts = 1e-3
4844 res_str = "%s" % process.base_string()
4845
4846 transfo_col_size = 17
4847 col_size = 18
4848 transfo_name_header = 'Transformation name'
4849
4850 if len(transfo_name_header) + 1 > transfo_col_size:
4851 transfo_col_size = len(transfo_name_header) + 1
4852
4853 misc.sprint(results)
4854 for transfo_name, value in results:
4855 if len(transfo_name) + 1 > transfo_col_size:
4856 transfo_col_size = len(transfo_name) + 1
4857
4858 res_str += '\n' + fixed_string_length(transfo_name_header, transfo_col_size) + \
4859 fixed_string_length("Value", col_size) + \
4860 fixed_string_length("Relative diff.", col_size) + "Result"
4861
4862 ref_value = results[0]
4863 res_str += '\n' + fixed_string_length(ref_value[0], transfo_col_size) + \
4864 fixed_string_length("%1.10e" % ref_value[1]['m2'], col_size)
4865
4866
4867 all_pass = True
4868 for res in results[1:]:
4869 threshold = threshold_boosts if 'BOOST' in res[0].upper() else \
4870 threshold_rotations
4871 rel_diff = abs((ref_value[1]['m2']-res[1]['m2'])\
4872 /((ref_value[1]['m2']+res[1]['m2'])/2.0))
4873 this_pass = rel_diff <= threshold
4874 if not this_pass:
4875 all_pass = False
4876 res_str += '\n' + fixed_string_length(res[0], transfo_col_size) + \
4877 fixed_string_length("%1.10e" % res[1]['m2'], col_size) + \
4878 fixed_string_length("%1.10e" % rel_diff, col_size) + \
4879 ("Passed" if this_pass else "Failed")
4880 if all_pass:
4881 res_str += '\n' + 'Summary: passed'
4882 else:
4883 res_str += '\n' + 'Summary: failed'
4884
4885 return res_str
4886
4888 """Present the results of a comparison in a nice list format
4889 if output='fail' return the number of failed process -- for test--
4890 """
4891
4892
4893 if comparison_results[0]['process']['perturbation_couplings']!=[]:
4894 return output_lorentz_inv_loop(comparison_results, output)
4895
4896 proc_col_size = 17
4897
4898 threshold=1e-10
4899 process_header = "Process"
4900
4901 if len(process_header) + 1 > proc_col_size:
4902 proc_col_size = len(process_header) + 1
4903
4904 for proc, values in comparison_results:
4905 if len(proc) + 1 > proc_col_size:
4906 proc_col_size = len(proc) + 1
4907
4908 col_size = 18
4909
4910 pass_proc = 0
4911 fail_proc = 0
4912 no_check_proc = 0
4913
4914 failed_proc_list = []
4915 no_check_proc_list = []
4916
4917 res_str = fixed_string_length(process_header, proc_col_size) + \
4918 fixed_string_length("Min element", col_size) + \
4919 fixed_string_length("Max element", col_size) + \
4920 fixed_string_length("Relative diff.", col_size) + \
4921 "Result"
4922
4923 for one_comp in comparison_results:
4924 proc = one_comp['process'].base_string()
4925 data = one_comp['results']
4926
4927 if data == 'pass':
4928 no_check_proc += 1
4929 no_check_proc_list.append(proc)
4930 continue
4931
4932 values = [data[i]['m2'] for i in range(len(data))]
4933
4934 min_val = min(values)
4935 max_val = max(values)
4936 diff = (max_val - min_val) / abs(max_val)
4937
4938 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
4939 fixed_string_length("%1.10e" % min_val, col_size) + \
4940 fixed_string_length("%1.10e" % max_val, col_size) + \
4941 fixed_string_length("%1.10e" % diff, col_size)
4942
4943 if diff < threshold:
4944 pass_proc += 1
4945 proc_succeed = True
4946 res_str += "Passed"
4947 else:
4948 fail_proc += 1
4949 proc_succeed = False
4950 failed_proc_list.append(proc)
4951 res_str += "Failed"
4952
4953
4954
4955
4956
4957 if len(data[0]['jamp'])!=0:
4958 for k in range(len(data[0]['jamp'][0])):
4959 sum = [0] * len(data)
4960
4961 for j in range(len(data[0]['jamp'])):
4962
4963 values = [abs(data[i]['jamp'][j][k])**2 for i in range(len(data))]
4964 sum = [sum[i] + values[i] for i in range(len(values))]
4965
4966
4967 min_val = min(sum)
4968 max_val = max(sum)
4969 if not max_val:
4970 continue
4971 diff = (max_val - min_val) / max_val
4972
4973 tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , proc_col_size) + \
4974 fixed_string_length("%1.10e" % min_val, col_size) + \
4975 fixed_string_length("%1.10e" % max_val, col_size) + \
4976 fixed_string_length("%1.10e" % diff, col_size)
4977
4978 if diff > 1e-10:
4979 if not len(failed_proc_list) or failed_proc_list[-1] != proc:
4980 fail_proc += 1
4981 pass_proc -= 1
4982 failed_proc_list.append(proc)
4983 res_str += tmp_str + "Failed"
4984 elif not proc_succeed:
4985 res_str += tmp_str + "Passed"
4986
4987
4988
4989 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
4990 (pass_proc, pass_proc + fail_proc,
4991 fail_proc, pass_proc + fail_proc)
4992
4993 if fail_proc != 0:
4994 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
4995 if no_check_proc:
4996 res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list)
4997
4998 if output == 'text':
4999 return res_str
5000 else:
5001 return fail_proc
5002
5004 """Present the results of a comparison in a nice list format
5005 if output='fail' return the number of failed process -- for test--
5006 """
5007
5008 proc_col_size = 17
5009
5010
5011
5012 pert_coupl = comparison_results[0]['perturbation_couplings']
5013 comparison_results = comparison_results[1:]
5014
5015 if pert_coupl:
5016 process_header = "Process [virt="+" ".join(pert_coupl)+"]"
5017 else:
5018 process_header = "Process"
5019
5020 if len(process_header) + 1 > proc_col_size:
5021 proc_col_size = len(process_header) + 1
5022
5023 for data in comparison_results:
5024 proc = data['process']
5025 if len(proc) + 1 > proc_col_size:
5026 proc_col_size = len(proc) + 1
5027
5028 pass_proc = 0
5029 fail_proc = 0
5030 no_check_proc = 0
5031
5032 failed_proc_list = []
5033 no_check_proc_list = []
5034
5035 col_size = 18
5036
5037 res_str = fixed_string_length(process_header, proc_col_size) + \
5038 fixed_string_length("Unitary", col_size) + \
5039 fixed_string_length("Feynman", col_size) + \
5040 fixed_string_length("Relative diff.", col_size) + \
5041 "Result"
5042
5043 for one_comp in comparison_results:
5044 proc = one_comp['process']
5045 data = [one_comp['value_unit'], one_comp['value_feynm']]
5046
5047
5048 if data[0] == 'pass':
5049 no_check_proc += 1
5050 no_check_proc_list.append(proc)
5051 continue
5052
5053 values = [data[i]['m2'] for i in range(len(data))]
5054
5055 min_val = min(values)
5056 max_val = max(values)
5057
5058
5059 diff = (max_val - min_val) / abs(max_val)
5060
5061 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
5062 fixed_string_length("%1.10e" % values[0], col_size) + \
5063 fixed_string_length("%1.10e" % values[1], col_size) + \
5064 fixed_string_length("%1.10e" % diff, col_size)
5065
5066 if diff < 1e-8:
5067 pass_proc += 1
5068 proc_succeed = True
5069 res_str += "Passed"
5070 else:
5071 fail_proc += 1
5072 proc_succeed = False
5073 failed_proc_list.append(proc)
5074 res_str += "Failed"
5075
5076
5077
5078
5079
5080 if len(data[0]['jamp'])>0:
5081 for k in range(len(data[0]['jamp'][0])):
5082 sum = [0, 0]
5083
5084 for j in range(len(data[0]['jamp'])):
5085
5086 values = [abs(data[i]['jamp'][j][k])**2 for i in range(len(data))]
5087 sum = [sum[i] + values[i] for i in range(len(values))]
5088
5089
5090 min_val = min(sum)
5091 max_val = max(sum)
5092 if not max_val:
5093 continue
5094 diff = (max_val - min_val) / max_val
5095
5096 tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , col_size) + \
5097 fixed_string_length("%1.10e" % sum[0], col_size) + \
5098 fixed_string_length("%1.10e" % sum[1], col_size) + \
5099 fixed_string_length("%1.10e" % diff, col_size)
5100
5101 if diff > 1e-10:
5102 if not len(failed_proc_list) or failed_proc_list[-1] != proc:
5103 fail_proc += 1
5104 pass_proc -= 1
5105 failed_proc_list.append(proc)
5106 res_str += tmp_str + "Failed"
5107 elif not proc_succeed:
5108 res_str += tmp_str + "Passed"
5109
5110
5111
5112 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
5113 (pass_proc, pass_proc + fail_proc,
5114 fail_proc, pass_proc + fail_proc)
5115
5116 if fail_proc != 0:
5117 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
5118 if no_check_proc:
5119 res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list)
5120
5121
5122 if output == 'text':
5123 return res_str
5124 else:
5125 return fail_proc
5126
5127 -def CMS_save_path(extension, cms_res, used_model, opts, output_path=None):
5128 """Creates a suitable filename for saving these results."""
5129
5130 if opts['name']=='auto' and opts['analyze']!='None':
5131
5132 return '%s.%s'%(os.path.splitext(opts['analyze'].split(',')[0])\
5133 [0],extension)
5134
5135 if opts['name']!='auto':
5136 basename = opts['name']
5137 else:
5138 prefix = 'cms_check_'
5139
5140 if len(cms_res['ordered_processes'])==1:
5141 proc = cms_res['ordered_processes'][0]
5142 replacements = [('=>','gt'),('<=','lt'),('/','_no_'),
5143 (' ',''),('+','p'),('-','m'),
5144 ('~','x'), ('>','_'),('=','eq'),('^2','squared')]
5145
5146 try:
5147 proc=proc[:proc.index('[')]
5148 except ValueError:
5149 pass
5150
5151 for key, value in replacements:
5152 proc = proc.replace(key,value)
5153
5154 basename =prefix+proc+'_%s_'%used_model.get('name')+\
5155 ( ('_'+'_'.join(cms_res['perturbation_orders'])) if \
5156 cms_res['perturbation_orders']!=[] else '')
5157
5158 else:
5159 basename = prefix+datetime.datetime.now().strftime("%Y_%m_%d_%Hh%Mm%Ss")
5160
5161 suffix = '_%s'%opts['tweak']['name'] if opts['tweak']['name']!='' else ''
5162 if output_path:
5163 return pjoin(output_path,'%s%s.%s'%(basename,suffix,extension))
5164 else:
5165 return '%s%s.%s'%(basename,suffix,extension)
5166
5168 """ Outputs nicely the outcome of the complex mass scheme check performed
5169 by varying the width in the offshell region of resonances found for eahc process.
5170 Output just specifies whether text should be returned or a list of failed
5171 processes. Use 'concise_text' for a consise report of the results."""
5172
5173 pert_orders=result['perturbation_orders']
5174
5175
5176
5177
5178
5179
5180
5181
5182
5183 diff_lambda_power = options['diff_lambda_power']
5184
5185
5186
5187
5188
5189
5190
5191 if 'has_FRdecay' in result:
5192 has_FRdecay = result['has_FRdecay']
5193 else:
5194 has_FRdecay = False
5195
5196 if not pert_orders:
5197 CMS_test_threshold = 1e-3
5198 else:
5199
5200
5201
5202
5203
5204
5205 if not has_FRdecay and ('recomputed_with' not in result or \
5206 result['recompute_width'] in ['always','first_time']):
5207 CMS_test_threshold = 2e-2*(1.0e-4/min(result['lambdaCMS']))
5208 else:
5209
5210
5211 CMS_test_threshold = 2e-2*(1.0e-5/min(result['lambdaCMS']))
5212
5213
5214
5215
5216 consideration_threshold = min(CMS_test_threshold/10.0, 0.05)
5217
5218
5219 group_val = 3
5220
5221
5222
5223
5224 diff_zero_threshold = 1e-3
5225
5226
5227 lambda_range = options['lambda_plot_range']
5228
5229
5230
5231
5232
5233
5234
5235
5236
5237
5238 res_str = ''
5239
5240 concise_str = ''
5241 concise_data = '%%(process)-%ds%%(asymptot)-15s%%(cms_check)-25s%%(status)-25s\n'
5242 concise_repl_dict = {'Header':{'process':'Process',
5243 'asymptot':'Asymptot',
5244 'cms_check':'Deviation to asymptot',
5245 'status':'Result'}}
5246
5247
5248
5249
5250
5251 useLatexParticleName = 'built-in'
5252 name2tex = {'e+':r'e^+','w+':r'W^+','a':r'\gamma','g':'g',
5253 'e-':r'e^-','w-':r'W^-','z':'Z','h':'H',
5254 'mu+':r'\mu^+',
5255 'mu-':r'\mu^-',
5256 'ta+':r'\tau^+',
5257 'ta-':r'\tau^-'}
5258 for p in ['e','m','t']:
5259 d = {'e':'e','m':r'\mu','t':r'\tau'}
5260 name2tex['v%s'%p]=r'\nu_{%s}'%d[p]
5261 name2tex['v%s~'%p]=r'\bar{\nu_{%s}}'%d[p]
5262
5263 for p in ['u','d','c','s','b','t']:
5264 name2tex[p]=p
5265 name2tex['%s~'%p]=r'\bar{%s}'%p
5266
5267 def format_particle_name(particle, latex=useLatexParticleName):
5268 p_name = particle
5269 if latex=='model':
5270 try:
5271 texname = model.get_particle(particle).get('texname')
5272 if texname and texname!='none':
5273 p_name = r'$\displaystyle %s$'%texname
5274 except:
5275 pass
5276 elif latex=='built-in':
5277 try:
5278 p_name = r'$\displaystyle %s$'%name2tex[particle]
5279 except:
5280 pass
5281 return p_name
5282
5283 def resonance_str(resonance, latex=useLatexParticleName):
5284 """ Provides a concise string to characterize the resonance """
5285 particle_name = model.get_particle(resonance['ParticlePDG']).get_name()
5286 mothersID=['%d'%n for n in sorted(resonance['FSMothersNumbers'])]
5287 return r"%s [%s]"%(format_particle_name(particle_name,latex=latex),
5288 ','.join(mothersID))
5289
5290 def format_title(process, resonance):
5291 """ Format the plot title given the process and resonance """
5292
5293 process_string = []
5294 for particle in process.split():
5295 if '<=' in particle:
5296 particle = particle.replace('<=',r'$\displaystyle <=$')
5297 if '^2' in particle:
5298 particle = particle.replace('^2',r'$\displaystyle ^2$')
5299 if particle=='$$':
5300 process_string.append(r'\$\$')
5301 continue
5302 if particle=='>':
5303 process_string.append(r'$\displaystyle \rightarrow$')
5304 continue
5305 if particle=='/':
5306 process_string.append(r'$\displaystyle /$')
5307 continue
5308 process_string.append(format_particle_name(particle))
5309
5310 if resonance=='':
5311 return r'CMS check for %s' %(' '.join(process_string))
5312 else:
5313 return r'CMS check for %s ( resonance %s )'\
5314 %(' '.join(process_string),resonance)
5315
5316 def guess_lambdaorder(ME_values_list, lambda_values, expected=None,
5317 proc=None, res=None):
5318 """ Guess the lambda scaling from a list of ME values and return it.
5319 Also compare with the expected result if specified and trigger a
5320 warning if not in agreement."""
5321
5322 bpowers = []
5323 for i, lambdaCMS in enumerate(lambda_values[1:]):
5324 bpowers.append(round(math.log(ME_values_list[0]/ME_values_list[i+1],\
5325 lambda_values[0]/lambdaCMS)))
5326
5327
5328 bpower = sorted([(el, bpowers.count(el)) for el in set(bpowers)],
5329 key = lambda elem: elem[1], reverse=True)[0][0]
5330 if not expected:
5331 return bpower
5332 if bpower != expected:
5333 logger.warning('The apparent scaling of the squared amplitude'+
5334 'seems inconsistent w.r.t to detected value '+
5335 '(%i vs %i). %i will be used.'%(expected,bpower,bpower)+
5336 ' This happend for process %s and resonance %s'%(proc, res))
5337 return bpower
5338
5339 def check_stability(ME_values, lambda_values, lambda_scaling, values_name):
5340 """ Checks if the values passed in argument are stable and return the
5341 stability check outcome warning if it is not precise enough. """
5342
5343 values = sorted([
5344 abs(val*(lambda_values[0]/lambda_values[i])**lambda_scaling) for \
5345 i, val in enumerate(ME_values)])
5346 median = values[len(values)//2]
5347 max_diff = max(abs(values[0]-median),abs(values[-1]-median))
5348 stability = max_diff/median
5349 stab_threshold = 1e-2
5350 if stability >= stab_threshold:
5351 return "== WARNING: Stability check failed for '%s' with stability %.2e.\n"\
5352 %(values_name, stability)
5353 else:
5354 return None
5355
5356 if options['analyze']=='None':
5357 if options['reuse']:
5358 save_path = CMS_save_path('pkl', result, model, options,
5359 output_path=output_path)
5360 buff = "\nThe results of this check have been stored on disk and its "+\
5361 "analysis can be rerun at anytime with the MG5aMC command:\n "+\
5362 " check cms --analyze=%s\n"%save_path
5363 res_str += buff
5364 concise_str += buff
5365 save_load_object.save_to_file(save_path, result)
5366 elif len(result['ordered_processes'])>0:
5367 buff = "\nUse the following synthax if you want to store "+\
5368 "the raw results on disk.\n"+\
5369 " check cms -reuse <proc_def> <options>\n"
5370 res_str += buff
5371 concise_str += buff
5372
5373
5374
5375
5376
5377 checks = []
5378 for process in result['ordered_processes']:
5379 checks.extend([(process,resID) for resID in \
5380 range(len(result[process]['CMS']))])
5381
5382 if options['reuse']:
5383 logFile = open(CMS_save_path(
5384 'log', result, model, options, output_path=output_path),'w')
5385
5386 lambdaCMS_list=result['lambdaCMS']
5387
5388
5389 failed_procs = []
5390
5391
5392 bar = lambda char: char*47
5393
5394
5395 if 'widths_computed' in result:
5396 res_str += '\n%s%s%s\n'%(bar('='),' Widths ',bar('='))
5397 if result['recompute_width'] == 'never':
5398 res_str += '| Widths extracted from the param_card.dat'
5399 else:
5400 res_str += '| Widths computed %s'%('analytically' if has_FRdecay
5401 else 'numerically')
5402 if result['recompute_width'] == 'first_time':
5403 res_str += ' for \lambda = 1'
5404 elif result['recompute_width'] == 'always':
5405 res_str += ' for all \lambda values'
5406 res_str += " using mode '--recompute_width=%s'.\n"%result['recompute_width']
5407 for particle_name, width in result['widths_computed']:
5408 res_str += '| %-10s = %-11.6gGeV\n'%('Width(%s)'%particle_name,width)
5409 res_str += '%s%s%s\n'%(bar('='),'='*8,bar('='))
5410
5411
5412
5413
5414 nstab_points=group_val
5415
5416 differences_target = {}
5417 for process, resID in checks:
5418
5419
5420 concise_repl_dict[process] = {'process':process,
5421 'asymptot':'N/A',
5422 'cms_check':'N/A',
5423 'status':'N/A'}
5424 proc_res = result[process]
5425 cms_res = proc_res['CMS'][resID]
5426 nwa_res = proc_res['NWA'][resID]
5427 resonance = resonance_str(cms_res['resonance'], latex='none')
5428 cms_born=cms_res['born']
5429 nwa_born=nwa_res['born']
5430
5431 res_str += '\n%s%s%s\n'%(bar('='),'='*8,bar('='))
5432
5433 proc_title = "%s (resonance %s)"%(process,resonance)
5434 centering = (bar(2)+8-len(proc_title))//2
5435 res_str += "%s%s\n"%(' '*centering,proc_title)
5436
5437 res_str += '%s%s%s\n'%(bar('-'),'-'*8,bar('-'))
5438
5439
5440 if diff_lambda_power!=1:
5441 res_str += "== WARNING diff_lambda_power is not 1 but = %g\n"%diff_lambda_power
5442 res_str += '%s%s%s\n'%(bar('-'),'-'*8,bar('-'))
5443
5444 born_power = guess_lambdaorder(nwa_born,lambdaCMS_list,
5445 expected=proc_res['born_order'], proc=process, res=resonance)
5446 stab_cms_born = check_stability(cms_born[-nstab_points:],
5447 lambdaCMS_list[-nstab_points:], born_power, 'CMS Born')
5448 if stab_cms_born:
5449 res_str += stab_cms_born
5450 stab_nwa_born = check_stability(nwa_born[-nstab_points:],
5451 lambdaCMS_list[-nstab_points:], born_power, 'NWA Born')
5452 if stab_nwa_born:
5453 res_str += stab_nwa_born
5454
5455 res_str += "== Kinematic configuration in GeV (E,px,pypz)\n"
5456 for i, p in enumerate(cms_res['resonance']['PS_point_used']):
5457 res_str += " | p%-2.d = "%(i+1)
5458 for pi in p:
5459 res_str += '%-24.17g'%pi if pi<0.0 else ' %-23.17g'%pi
5460 res_str += "\n"
5461
5462 res_str += "== Offshellnesses of all detected resonances\n"
5463 for res_name, offshellness in cms_res['resonance']['offshellnesses']:
5464 res_str += " | %-15s = %f\n"%(res_name, offshellness)
5465 res_str += '%s%s%s\n'%(bar('-'),'-'*8,bar('-'))
5466
5467 if not pert_orders:
5468 res_str += "== Born scaling lambda^n_born. nborn = %d\n"%born_power
5469 else:
5470 cms_finite=cms_res['finite']
5471 nwa_finite=nwa_res['finite']
5472 loop_power = guess_lambdaorder(nwa_finite,lambdaCMS_list,
5473 expected=proc_res['loop_order'], proc=process, res=resonance)
5474 res_str += "== Scaling lambda^n. nborn, nloop = %d, %d\n"\
5475 %(born_power,loop_power)
5476 stab_cms_finite = check_stability(cms_finite[-nstab_points:],
5477 lambdaCMS_list[-nstab_points:], loop_power, 'CMS finite')
5478 if stab_cms_finite:
5479 res_str += stab_cms_finite
5480 stab_nwa_finite = check_stability(nwa_finite[-nstab_points:],
5481 lambdaCMS_list[-nstab_points:], loop_power, 'NWA finite')
5482 if stab_nwa_finite:
5483 res_str += stab_nwa_finite
5484
5485 CMSData = []
5486 NWAData = []
5487 DiffData = []
5488 for idata, lam in enumerate(lambdaCMS_list):
5489 if not pert_orders:
5490 new_cms=cms_born[idata]/(lam**born_power)
5491 new_nwa=nwa_born[idata]/(lam**born_power)
5492 else:
5493 new_cms=(cms_finite[idata]+cms_born[idata]-nwa_born[idata])/(lam*nwa_born[idata])
5494 new_nwa=nwa_finite[idata]/(lam*nwa_born[idata])
5495 new_diff=(new_cms-new_nwa)/(lam**diff_lambda_power)
5496 CMSData.append(new_cms)
5497 NWAData.append(new_nwa)
5498 DiffData.append(new_diff)
5499
5500
5501
5502
5503
5504
5505 trim_range=int(((1.0-0.6)/2.0)*len(DiffData))
5506 low_diff_median = sorted(DiffData[trim_range:-trim_range])\
5507 [(len(DiffData)-2*trim_range)//2]
5508
5509
5510
5511
5512
5513
5514 current_median = 0
5515
5516 scan_index = 0
5517 reference = abs(sorted(NWAData)[len(NWAData)//2])
5518 if low_diff_median!= 0.0:
5519 if abs(reference/low_diff_median)<diff_zero_threshold:
5520 reference = abs(low_diff_median)
5521 while True:
5522 scanner = DiffData[scan_index:group_val+scan_index]
5523 current_median = sorted(scanner)[len(scanner)//2]
5524
5525
5526 if abs(current_median-low_diff_median)/reference<\
5527 consideration_threshold:
5528 break;
5529 scan_index += 1
5530 if (group_val+scan_index)>=len(DiffData):
5531
5532
5533 logger.warning('The median scanning failed during the CMS check '+
5534 'for process %s'%proc_title+\
5535 'This is means that the difference plot has not stable'+\
5536 'intermediate region and MG5_aMC will arbitrarily consider the'+\
5537 'left half of the values.')
5538 scan_index = -1
5539 break;
5540
5541 if scan_index == -1:
5542 cms_check_data_range = len(DiffData)//2
5543 else:
5544 cms_check_data_range = scan_index + group_val
5545
5546 res_str += "== Data range considered (min, max, n_val) = (%.1e, %.1e, %d)\n"\
5547 %(lambdaCMS_list[-1],lambdaCMS_list[scan_index],
5548 len(lambdaCMS_list)-scan_index)
5549
5550 CMScheck_values = DiffData[cms_check_data_range:]
5551
5552
5553
5554
5555 if scan_index >= 0:
5556
5557 scan_index = len(CMScheck_values)
5558 used_group_val = max(3,group_val)
5559 unstability_found = True
5560 while True:
5561 scanner = CMScheck_values[scan_index-used_group_val:scan_index]
5562 maxdiff = max(abs(scan-low_diff_median) for scan in scanner)
5563 if maxdiff/reference<consideration_threshold:
5564 break;
5565 if (scan_index-used_group_val)==0:
5566
5567
5568 unstability_found = False
5569 break;
5570
5571 scan_index -= 1
5572
5573
5574 if unstability_found:
5575 unstab_check=CMScheck_values[scan_index:]
5576 relative_array = [val > CMScheck_values[scan_index-1] for
5577 val in unstab_check]
5578 upper = relative_array.count(True)
5579 lower = relative_array.count(False)
5580 if not ((lower==0 and upper>=0) or (lower>=0 and upper==0)):
5581 logger.warning(
5582 """For process %s, a numerically unstable region was detected starting from lambda < %.1e.
5583 Look at the plot in this region (and possibly throw more points using the option --lambdaCMS).
5584 If this is indeed a stability issue, then either decrease MLStabThreshold in MadLoop or decrease the
5585 minimum value of lambda to be considered in the CMS check."""\
5586 %(proc_title, lambdaCMS_list[cms_check_data_range+scan_index-1]))
5587
5588
5589
5590
5591 scan_index = 0
5592 max_diff = 0.0
5593 res_str += "== Ref. value used in the ratios (Born NWA) = %s\n"\
5594 %('%.3g'%reference)
5595 res_str += "== Asymptotic difference value detected = %s\n"\
5596 %('%.3g'%low_diff_median)
5597 concise_repl_dict[process]['asymptot'] = '%.3e'%low_diff_median
5598
5599
5600 differences_target[(process,resID)]= low_diff_median
5601
5602 while True:
5603 current_vals = CMScheck_values[scan_index:scan_index+group_val]
5604 max_diff = max(max_diff, abs(low_diff_median-
5605 sorted(current_vals)[len(current_vals)//2])/reference)
5606 if (scan_index+group_val)>=len(CMScheck_values):
5607 break
5608 scan_index += 1
5609
5610
5611 cms_check = (max_diff*100.0, '>' if max_diff>CMS_test_threshold else '<',
5612 CMS_test_threshold*100.0)
5613 res_str += "== CMS check result (threshold) = %.3g%% (%s%.3g%%)\n"%cms_check
5614 concise_repl_dict[process]['cms_check'] = \
5615 "%-10s (%s%.3g%%)"%('%.3g%%'%cms_check[0],cms_check[1],cms_check[2])
5616
5617 if max_diff>CMS_test_threshold:
5618 failed_procs.append((process,resonance))
5619 res_str += "%s %s %s\n"%(bar('='),
5620 'FAILED' if max_diff>CMS_test_threshold else 'PASSED',bar('='))
5621 concise_repl_dict[process]['status'] = 'Failed' if max_diff>CMS_test_threshold \
5622 else 'Passed'
5623
5624 if output=='concise_text':
5625
5626 max_proc_size = max(
5627 [len(process) for process in result['ordered_processes']]+[10])
5628
5629 res_str = concise_str
5630 res_str += '\n'+concise_data%(max_proc_size+4)%concise_repl_dict['Header']
5631 for process in result['ordered_processes']:
5632 res_str += (concise_data%(max_proc_size+4)%concise_repl_dict[process])
5633
5634 if len(checks):
5635 res_str += "Summary: %i/%i passed"%(len(checks)-len(failed_procs),len(checks))+\
5636 ('.\n' if not failed_procs else ', failed checks are for:\n')
5637 else:
5638 return "\nNo CMS check to perform, the process either has no diagram or does not "+\
5639 "not feature any massive s-channel resonance."
5640
5641 for process, resonance in failed_procs:
5642 res_str += "> %s, %s\n"%(process, resonance)
5643
5644 if output=='concise_text':
5645 res_str += '\nMore detailed information on this check available with the command:\n'
5646 res_str += ' MG5_aMC>display checks\n'
5647
5648
5649
5650
5651 if not options['show_plot']:
5652 if options['reuse']:
5653 logFile.write(res_str)
5654 logFile.close()
5655 if output.endswith('text'):
5656 return res_str
5657 else:
5658 return failed_procs
5659
5660 fig_output_file = CMS_save_path('pdf', result, model, options,
5661 output_path=output_path)
5662 base_fig_name = fig_output_file[:-4]
5663 suffix = 1
5664 while os.path.isfile(fig_output_file):
5665 fig_output_file = '%s__%d__.pdf'%(base_fig_name,suffix)
5666 suffix+=1
5667
5668 process_data_plot_dict={}
5669
5670
5671
5672 all_res = [(result, None)]
5673 for i, add_res in enumerate(options['analyze'].split(',')[1:]):
5674 specs =re.match(r'^(?P<filename>.*)\((?P<title>.*)\)$', add_res)
5675 if specs:
5676 filename = specs.group('filename')
5677 title = specs.group('title')
5678 else:
5679 filename = add_res
5680 title = '#%d'%(i+1)
5681
5682 new_result = save_load_object.load_from_file(filename)
5683 if new_result is None:
5684 raise InvalidCmd('The complex mass scheme check result'+
5685 " file below could not be read.\n %s"%filename)
5686 if len(new_result['ordered_processes'])!=len(result['ordered_processes']) \
5687 or len(new_result['lambdaCMS'])!=len(result['lambdaCMS']):
5688 raise self.InvalidCmd('The complex mass scheme check result'+
5689 " file below does not seem compatible.\n %s"%filename)
5690 all_res.append((new_result,title))
5691
5692
5693 for process, resID in checks:
5694 data1=[]
5695 data2=[]
5696 info ={}
5697 for res in all_res:
5698 proc_res = res[0][process]
5699 cms_res = proc_res['CMS'][resID]
5700 nwa_res = proc_res['NWA'][resID]
5701 resonance = resonance_str(cms_res['resonance'])
5702 if options['resonances']!=1:
5703 info['title'] = format_title(process, resonance)
5704 else:
5705 info['title'] = format_title(process, '')
5706
5707 cms_born=cms_res['born']
5708 nwa_born=nwa_res['born']
5709 if len(cms_born) != len(lambdaCMS_list) or\
5710 len(nwa_born) != len(lambdaCMS_list):
5711 raise MadGraph5Error, 'Inconsistent list of results w.r.t. the'+\
5712 ' lambdaCMS values specified for process %s'%process
5713 if pert_orders:
5714 cms_finite=cms_res['finite']
5715 nwa_finite=nwa_res['finite']
5716 if len(cms_finite) != len(lambdaCMS_list) or\
5717 len(nwa_finite) != len(lambdaCMS_list):
5718 raise MadGraph5Error, 'Inconsistent list of results w.r.t. the'+\
5719 ' lambdaCMS values specified for process %s'%process
5720
5721 bpower = guess_lambdaorder(nwa_born,lambdaCMS_list,
5722 expected=proc_res['born_order'], proc=process, res=resonance)
5723
5724 CMSData = []
5725 NWAData = []
5726 DiffData = []
5727 for idata, lam in enumerate(lambdaCMS_list):
5728 if not pert_orders:
5729 new_cms = cms_born[idata]/lam**bpower
5730 new_nwa = nwa_born[idata]/lam**bpower
5731 else:
5732 new_cms=cms_finite[idata]+cms_born[idata]-nwa_born[idata]
5733 new_nwa=nwa_finite[idata]
5734 new_cms /= lam*nwa_born[idata]
5735 new_nwa /= lam*nwa_born[idata]
5736 new_diff=(new_cms-new_nwa)/(lam**diff_lambda_power)
5737 CMSData.append(new_cms)
5738 NWAData.append(new_nwa)
5739 DiffData.append(new_diff)
5740 if res[1] is None:
5741 if not pert_orders:
5742 data1.append([r'$\displaystyle CMS\;=\;\mathcal{M}_{CMS}^{(0)}/\lambda^%d$'%bpower,CMSData])
5743 data1.append([r'$\displaystyle NWA\;=\;\mathcal{M}_{NWA}^{(0)}/\lambda^%d$'%bpower,NWAData])
5744 else:
5745 data1.append([r'$\displaystyle CMS\;=\;(\mathcal{M}^{(1)}_{CMS}+\mathcal{M}_{CMS}^{(0)}-\mathcal{M}^{(0)}_{NWA})/(\lambda\cdot\mathcal{M}^{(0)}_{NWA})$',CMSData])
5746 data1.append([r'$\displaystyle NWA\;=\;\mathcal{M}^{(1)}_{NWA}/(\lambda\cdot\mathcal{M}^{(0)}_{NWA})$',NWAData])
5747 data2.append([r'$\displaystyle\Delta\;=\;(CMS-NWA)/\lambda%s$'\
5748 %('' if diff_lambda_power==1 else r'^{%g}'%diff_lambda_power)
5749 ,DiffData])
5750 data2.append([r'Detected asymptot',[differences_target[(process,resID)]
5751 for i in range(len(lambdaCMS_list))]])
5752 else:
5753 data1.append([r'$\displaystyle CMS$ %s'%res[1].replace('_',' ').replace('#','\#'), CMSData])
5754 data1.append([r'$\displaystyle NWA$ %s'%res[1].replace('_',' ').replace('#','\#'), NWAData])
5755 data2.append([r'$\displaystyle\Delta$ %s'%res[1].replace('_',' ').replace('#','\#'), DiffData])
5756
5757 process_data_plot_dict[(process,resID)]=(data1,data2, info)
5758
5759
5760 try:
5761 import matplotlib.pyplot as plt
5762 from matplotlib.backends.backend_pdf import PdfPages
5763 logger.info('Rendering plots... (this can take some time because of the latex labels)')
5764
5765 res_str += \
5766 """\n-----------------------------------------------------------------------------------------------
5767 | In the plots, the Complex Mass Scheme check is successful if the normalized difference |
5768 | between the CMS and NWA result (lower inset) tends to a constant when \lambda goes to zero. |
5769 -----------------------------------------------------------------------------------------------\n"""
5770
5771
5772 if lambda_range[1]>0:
5773 min_lambda_index = -1
5774 for i, lam in enumerate(lambdaCMS_list):
5775 if lam<=lambda_range[1]:
5776 min_lambda_index = i
5777 break
5778 else:
5779 min_lambda_index = 0
5780 if lambda_range[0]>0:
5781 max_lambda_index = -1
5782 for i, lam in enumerate(lambdaCMS_list):
5783 if lam<=lambda_range[0]:
5784 max_lambda_index=i-1
5785 break
5786 else:
5787 max_lambda_index=len(lambdaCMS_list)-1
5788
5789 if max_lambda_index==-1 or min_lambda_index==-1 or \
5790 min_lambda_index==max_lambda_index:
5791 raise InvalidCmd('Invalid lambda plotting range: (%.1e,%.1e)'%\
5792 (lambda_range[0],lambda_range[1]))
5793
5794 if lambda_range[0]>0.0 or lambda_range[1]>0.0:
5795 lambdaCMS_list = lambdaCMS_list[min_lambda_index:max_lambda_index+1]
5796
5797 plt.rc('text', usetex=True)
5798 plt.rc('font', family='serif')
5799 pp=PdfPages(fig_output_file)
5800 if len(checks)==0 or len(process_data_plot_dict[checks[0]][1])<=7:
5801 colorlist=['b','r','g','k','c','m','y']
5802 else:
5803 import matplotlib.colors as colors
5804 import matplotlib.cm as mplcm
5805 import matplotlib.colors as colors
5806
5807
5808 cm = plt.get_cmap('gist_rainbow')
5809 cNorm = colors.Normalize(vmin=0, vmax=(len(data2)-1))
5810 scalarMap = mplcm.ScalarMappable(norm=cNorm, cmap=cm)
5811
5812 colorlist = [scalarMap.to_rgba(i*0.9) for i in range(len(data2))]
5813
5814
5815
5816
5817
5818
5819 legend_size = 10
5820 for iproc, (process, resID) in enumerate(checks):
5821 data1,data2, info=process_data_plot_dict[(process,resID)]
5822
5823 if lambda_range[0]>0.0 or lambda_range[1]>0.0:
5824 for i in range(len(data1)):
5825 data1[i][1]=data1[i][1][min_lambda_index:max_lambda_index+1]
5826 for i in range(len(data2)):
5827 data2[i][1]=data2[i][1][min_lambda_index:max_lambda_index+1]
5828 plt.figure(iproc+1)
5829 plt.subplot(211)
5830 minvalue=1e+99
5831 maxvalue=-1e+99
5832 for i, d1 in enumerate(data1):
5833
5834 color=colorlist[i//2]
5835 data_plot=d1[1]
5836 minvalue=min(min(data_plot),minvalue)
5837 maxvalue=max(max(data_plot),maxvalue)
5838 plt.plot(lambdaCMS_list, data_plot, color=color, marker='', \
5839 linestyle=('-' if i%2==0 else '--'),
5840 label=(d1[0] if (i%2==0 or i==1) else '_nolegend_'))
5841 ymin = minvalue-(maxvalue-minvalue)/5.
5842 ymax = maxvalue+(maxvalue-minvalue)/5.
5843
5844 plt.yscale('linear')
5845 plt.xscale('log')
5846 plt.title(info['title'],fontsize=12,y=1.08)
5847 plt.ylabel(r'$\displaystyle \mathcal{M}$')
5848
5849 if ymax*len(data1)-sum(max(d1[1][-len(d1[1])//2:]) \
5850 for d1 in data1) > 0.5*(ymax-ymin)*len(data1):
5851 plt.legend(prop={'size':legend_size},loc='upper left', frameon=False)
5852 else:
5853 plt.legend(prop={'size':legend_size},loc='lower left', frameon=False)
5854
5855 plt.axis([min(lambdaCMS_list),max(lambdaCMS_list), ymin, ymax])
5856
5857 plt.subplot(212)
5858 minvalue=1e+99
5859 maxvalue=-1e+99
5860
5861 try:
5862 asymptot_index = [d2[0] for d2 in data2].index('Detected asymptot')
5863 plt.plot(lambdaCMS_list, data2[asymptot_index][1],
5864 color='0.75', marker='', linestyle='-', label='')
5865 except ValueError:
5866 pass
5867
5868 color_ID = -1
5869 for d2 in data2:
5870
5871 if d2[0]=='Detected asymptot':
5872 continue
5873 color_ID += 1
5874 color=colorlist[color_ID]
5875 data_plot=d2[1]
5876 minvalue=min(min(data_plot),minvalue)
5877 maxvalue=max(max(data_plot),maxvalue)
5878 plt.plot(lambdaCMS_list, data_plot, color=color, marker='',\
5879 linestyle='-', label=d2[0])
5880 ymin = minvalue-(maxvalue-minvalue)/5.
5881 ymax = maxvalue+(maxvalue-minvalue)/5.
5882
5883 plt.yscale('linear')
5884 plt.xscale('log')
5885 plt.ylabel(r'$\displaystyle \Delta$')
5886 plt.xlabel(r'$\displaystyle \lambda$')
5887
5888
5889 sd = [sorted(d2[1][-len(d2[1])//2:]) for d2 in data2]
5890 left_stability = sum(abs(s[0]-s[-1]) for s in sd)
5891 sd = [sorted(d2[1][:-len(d2[1])//2]) for d2 in data2]
5892 right_stability = sum(abs(s[0]-s[-1]) for s in sd)
5893 left_stable = False if right_stability==0.0 else \
5894 (left_stability/right_stability)<0.1
5895
5896 if left_stable:
5897 if ymax*len(data2)-sum(max(d2[1][-len(d2[1])//2:]) \
5898 for d2 in data2) > 0.5*(ymax-ymin)*len(data2):
5899 plt.legend(prop={'size':legend_size},loc='upper left', frameon=False)
5900 else:
5901 plt.legend(prop={'size':legend_size},loc='lower left', frameon=False)
5902 else:
5903 if ymax*len(data2)-sum(max(d2[1][:-len(d2[1])//2]) \
5904 for d2 in data2) > 0.5*(ymax-ymin)*len(data2):
5905 plt.legend(prop={'size':legend_size},loc='upper right', frameon=False)
5906 else:
5907 plt.legend(prop={'size':legend_size},loc='lower right', frameon=False)
5908
5909 plt.axis([min(lambdaCMS_list),max(lambdaCMS_list),\
5910 minvalue-(maxvalue-minvalue)/5., maxvalue+(maxvalue-minvalue)/5.])
5911
5912 plt.savefig(pp,format='pdf')
5913
5914 pp.close()
5915
5916 if len(checks)>0:
5917 logger.info('Complex Mass Scheme check plot output to file %s. '%fig_output_file)
5918
5919 if sys.platform.startswith('linux'):
5920 misc.call(["xdg-open", fig_output_file])
5921 elif sys.platform.startswith('darwin'):
5922 misc.call(["open", fig_output_file])
5923
5924 plt.close("all")
5925
5926 except Exception as e:
5927 if isinstance(e, ImportError):
5928 res_str += "\n= Install matplotlib to get a "+\
5929 "graphical display of the results of the cms check."
5930 else:
5931 general_error = "\n= Could not produce the cms check plot because of "+\
5932 "the following error: %s"%str(e)
5933 try:
5934 import Tkinter
5935 if isinstance(e, Tkinter.TclError):
5936 res_str += "\n= Plots are not generated because your system"+\
5937 " does not support graphical display."
5938 else:
5939 res_str += general_error
5940 except:
5941 res_str += general_error
5942
5943 if options['reuse']:
5944 logFile.write(res_str)
5945 logFile.close()
5946
5947 if output.endswith('text'):
5948 return res_str
5949 else:
5950 return failed_procs
5951