Package madgraph :: Package loop :: Module loop_exporters
[hide private]
[frames] | no frames]

Source Code for Module madgraph.loop.loop_exporters

   1  ################################################################################ 
   2  # 
   3  # Copyright (c) 2009 The MadGraph5_aMC@NLO Development team and Contributors 
   4  # 
   5  # This file is a part of the MadGraph5_aMC@NLO project, an application which  
   6  # automatically generates Feynman diagrams and matrix elements for arbitrary 
   7  # high-energy processes in the Standard Model and beyond. 
   8  # 
   9  # It is subject to the MadGraph5_aMC@NLO license which should accompany this  
  10  # distribution. 
  11  # 
  12  # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch 
  13  # 
  14  ################################################################################ 
  15  """Methods and classes to export matrix elements to v4 format.""" 
  16   
  17  from __future__ import absolute_import 
  18  import copy 
  19  import fractions 
  20  import glob 
  21  import logging 
  22  import os 
  23  import stat 
  24  import sys 
  25  import re 
  26  import shutil 
  27  import subprocess 
  28  import itertools 
  29  import time 
  30  import datetime 
  31   
  32   
  33  import aloha 
  34   
  35  import madgraph.core.base_objects as base_objects 
  36  import madgraph.core.color_algebra as color 
  37  import madgraph.core.helas_objects as helas_objects 
  38  import madgraph.loop.loop_helas_objects as loop_helas_objects 
  39  import madgraph.iolibs.drawing_eps as draw 
  40  import madgraph.iolibs.files as files 
  41  import madgraph.iolibs.group_subprocs as group_subprocs 
  42  import madgraph.various.banner as banner_mod 
  43  import madgraph.various.misc as misc 
  44  import madgraph.various.q_polynomial as q_polynomial 
  45  import madgraph.iolibs.file_writers as writers 
  46  import madgraph.iolibs.gen_infohtml as gen_infohtml 
  47  import madgraph.iolibs.template_files as template_files 
  48  import madgraph.iolibs.ufo_expression_parsers as parsers 
  49  import madgraph.iolibs.export_v4 as export_v4 
  50  import madgraph.various.diagram_symmetry as diagram_symmetry 
  51  import madgraph.various.process_checks as process_checks 
  52  import madgraph.various.progressbar as pbar 
  53  import madgraph.various.q_polynomial as q_polynomial 
  54  import madgraph.core.color_amp as color_amp 
  55  import madgraph.iolibs.helas_call_writers as helas_call_writers 
  56  import models.check_param_card as check_param_card 
  57  from madgraph.loop.loop_base_objects import LoopDiagram 
  58  from madgraph.loop.MadLoopBannerStyles import MadLoopBannerStyles 
  59  from six.moves import range 
  60  from six.moves import zip 
  61   
  62   
  63   
  64  pjoin = os.path.join 
  65   
  66  import aloha.create_aloha as create_aloha 
  67  import models.write_param_card as param_writer 
  68  from madgraph import MadGraph5Error, MG5DIR, InvalidCmd 
  69  from madgraph.iolibs.files import cp, ln, mv 
  70  pjoin = os.path.join 
  71  _file_path = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0] + '/' 
  72  logger = logging.getLogger('madgraph.loop_exporter') 
  73   
  74  #=============================================================================== 
  75  # LoopExporterFortran 
  76  #=============================================================================== 
77 -class LoopExporterFortran(object):
78 """ Class to define general helper functions to the different 79 loop fortran exporters (ME, SA, MEGroup, etc..) which will inherit both 80 from this class AND from the corresponding ProcessExporterFortran(ME,SA,...). 81 It plays the same role as ProcessExporterFrotran and simply defines here 82 loop-specific helpers functions necessary for all loop exporters. 83 Notice that we do not have LoopExporterFortran inheriting from 84 ProcessExporterFortran but give access to arguments like dir_path and 85 clean using options. This avoids method resolution object ambiguity""" 86 87 default_opt = dict(export_v4.ProcessExporterFortran.default_opt) 88 default_opt.update({'clean': False, 'complex_mass':False, 89 'export_format':'madloop', 'mp':True, 90 'loop_dir':'', 'cuttools_dir':'', 91 'fortran_compiler':'gfortran', 92 'SubProc_prefix': 'P', 93 'output_dependencies': 'external', 94 'compute_color_flows': False, 95 'mode':''}) 96 97 include_names = {'ninja' : 'mninja.mod', 98 'golem' : 'generic_function_1p.mod', 99 'samurai':'msamurai.mod', 100 'collier': 'collier.mod'} 101
102 - def __init__(self, dir_path = "", opt=None):
103 """Initiate the LoopExporterFortran with directory information on where 104 to find all the loop-related source files, like CutTools""" 105 106 self.opt = dict(self.default_opt) 107 if opt: 108 self.opt.update(opt) 109 110 self.SubProc_prefix = self.opt['SubProc_prefix'] 111 self.loop_dir = self.opt['loop_dir'] 112 self.cuttools_dir = self.opt['cuttools_dir'] 113 self.fortran_compiler = self.opt['fortran_compiler'] 114 self.dependencies = self.opt['output_dependencies'] 115 self.compute_color_flows = self.opt['compute_color_flows'] 116 117 super(LoopExporterFortran,self).__init__(dir_path, self.opt)
118 119 193
194 - def get_aloha_model(self, model):
195 """ Caches the aloha model created here as an attribute of the loop 196 exporter so that it can later be used in the LoopHelasMatrixElement 197 in the function compute_all_analytic_information for recycling aloha 198 computations across different LoopHelasMatrixElements steered by the 199 same loop exporter. 200 """ 201 if not hasattr(self, 'aloha_model'): 202 self.aloha_model = create_aloha.AbstractALOHAModel(model.get('modelpath')) 203 204 missing_lor = [] 205 for lor in model.get('lorentz'): 206 if not hasattr(self.aloha_model.model.lorentz, lor.name): 207 missing_lor.append(lor) 208 if missing_lor: 209 logger.debug("adding in aloha model %s lorentz struct" % len(missing_lor)) 210 self.aloha_model.add_Lorentz_object(missing_lor) 211 212 return self.aloha_model
213 214 #=========================================================================== 215 # write the multiple-precision header files 216 #===========================================================================
217 - def write_mp_files(self, writer_mprec, writer_mpc):
218 """Write the cts_mprec.h and cts_mpc.h""" 219 220 file = open(os.path.join(self.cuttools_dir, 'src/cts/cts_mprec.h')).read() 221 writer_mprec.writelines(file) 222 223 file = open(os.path.join(self.cuttools_dir, 'src/cts/cts_mpc.h')).read() 224 file = file.replace('&','') 225 writer_mpc.writelines(file) 226 227 return True
228 229 #=============================================================================== 230 # LoopProcessExporterFortranSA 231 #===============================================================================
232 -class LoopProcessExporterFortranSA(LoopExporterFortran, 233 export_v4.ProcessExporterFortranSA):
234 235 """Class to take care of exporting a set of loop matrix elements in the 236 Fortran format.""" 237 238 template_dir=os.path.join(_file_path,'iolibs/template_files/loop') 239 madloop_makefile_name = 'makefile' 240 241 MadLoop_banner = MadLoopBannerStyles.get_MadLoop_Banner( 242 style='classic2', color='green', 243 top_frame_char = '=', bottom_frame_char = '=', 244 left_frame_char = '{',right_frame_char = '}', 245 print_frame=True, side_margin = 7, up_margin = 1) 246
247 - def __init__(self, *args, **opts):
248 super(LoopProcessExporterFortranSA,self).__init__(*args,**opts) 249 self.unique_id=0 # to allow collier to distinguish the various loop subprocesses 250 self.has_loop_induced = False
251
252 - def copy_template(self, model):
253 """Additional actions needed to setup the Template. 254 """ 255 super(LoopProcessExporterFortranSA, self).copy_template(model) 256 257 self.loop_additional_template_setup()
258
259 - def finalize(self, matrix_element, cmdhistory, MG5options, outputflag):
260 """create the global information for loops""" 261 262 super(LoopProcessExporterFortranSA,self).finalize(matrix_element, 263 cmdhistory, MG5options, outputflag) 264 265 266 MLCard = banner_mod.MadLoopParam(pjoin(self.dir_path, 'Cards', 'MadLoopParams.dat')) 267 # For loop-induced processes and *only* when summing over all helicity configurations 268 # (which is the default for standalone usage), COLLIER is faster than Ninja. 269 if self.has_loop_induced: 270 MLCard['MLReductionLib'] = "7|6|1" 271 # Computing the poles with COLLIER also unnecessarily slows down the code 272 # It should only be set to True for checks and it's acceptable to remove them 273 # here because for loop-induced processes they should be zero anyway. 274 # We keep it active for non-loop induced processes because COLLIER is not the 275 # main reduction tool in that case, and the poles wouldn't be zero then 276 MLCard['COLLIERComputeUVpoles'] = False 277 MLCard['COLLIERComputeIRpoles'] = False 278 279 MLCard.write(pjoin(self.dir_path, 'Cards', 'MadLoopParams_default.dat')) 280 MLCard.write(pjoin(self.dir_path, 'Cards', 'MadLoopParams.dat'))
281
282 - def write_f2py_makefile(self):
283 return
284
285 - def write_f2py_check_sa(self, matrix_element, output_path):
286 """ Write the general check_sa.py in SubProcesses that calls all processes successively.""" 287 288 # No need to further edit this file for now. 289 file = open(os.path.join(self.template_dir,\ 290 'check_sa_all.py.inc')).read() 291 open(output_path,'w').writelines(file) 292 # Make it executable 293 os.chmod(output_path, os.stat(output_path).st_mode | stat.S_IEXEC)
294 295
296 - def write_f2py_splitter(self):
297 """write a function to call the correct matrix element""" 298 299 template = """ 300 %(python_information)s 301 302 SUBROUTINE INITIALISE(PATH) 303 C ROUTINE FOR F2PY to read the benchmark point. 304 IMPLICIT NONE 305 CHARACTER*512 PATH 306 CF2PY INTENT(IN) :: PATH 307 CALL SETPARA(PATH) !first call to setup the paramaters 308 RETURN 309 END 310 311 subroutine CHANGE_PARA(name, value) 312 implicit none 313 CF2PY intent(in) :: name 314 CF2PY intent(in) :: value 315 316 character*512 name 317 double precision value 318 319 include '../Source/MODEL/input.inc' 320 include '../Source/MODEL/coupl.inc' 321 include '../Source/MODEL/mp_coupl.inc' 322 include '../Source/MODEL/mp_input.inc' 323 324 SELECT CASE (name) 325 %(parameter_setup)s 326 CASE DEFAULT 327 write(*,*) 'no parameter matching', name 328 END SELECT 329 330 return 331 end 332 333 subroutine update_all_coup() 334 implicit none 335 call coup() 336 call printout() 337 return 338 end 339 340 341 SUBROUTINE SET_MADLOOP_PATH(PATH) 342 C Routine to set the path of the folder 'MadLoop5_resources' to MadLoop 343 CHARACTER(512) PATH 344 CF2PY intent(in)::path 345 CALL SETMADLOOPPATH(PATH) 346 END 347 348 subroutine smatrixhel(pdgs, procid, npdg, p, ALPHAS, SCALES2, nhel, ANS, RETURNCODE) 349 IMPLICIT NONE 350 351 CF2PY double precision, intent(in), dimension(0:3,npdg) :: p 352 CF2PY integer, intent(in), dimension(npdg) :: pdgs 353 CF2PY integer, intent(in):: procid 354 CF2PY integer, intent(in) :: npdg 355 CF2PY double precision, intent(out) :: ANS 356 CF2PY integer, intent(out) :: RETURNCODE 357 CF2PY double precision, intent(in) :: ALPHAS 358 CF2PY double precision, intent(in) :: SCALES2 359 360 integer pdgs(*) 361 integer npdg, nhel, RETURNCODE, procid 362 double precision p(*) 363 double precision ANS, ALPHAS, PI,SCALES2 364 1 continue 365 %(smatrixhel)s 366 367 return 368 end 369 370 subroutine get_pdg_order(OUT, ALLPROC) 371 IMPLICIT NONE 372 CF2PY INTEGER, intent(out) :: OUT(%(nb_me)i,%(maxpart)i) 373 CF2PY INTEGER, intent(out) :: ALLPROC(%(nb_me)i) 374 INTEGER OUT(%(nb_me)i,%(maxpart)i), PDGS(%(nb_me)i,%(maxpart)i) 375 INTEGER ALLPROC(%(nb_me)i),PIDs(%(nb_me)i) 376 DATA PDGS/ %(pdgs)s / 377 DATA PIDS/ %(pids)s / 378 OUT=PDGS 379 ALLPROC = PIDS 380 RETURN 381 END 382 383 subroutine get_prefix(PREFIX) 384 IMPLICIT NONE 385 CF2PY CHARACTER*20, intent(out) :: PREFIX(%(nb_me)i) 386 character*20 PREFIX(%(nb_me)i),PREF(%(nb_me)i) 387 DATA PREF / '%(prefix)s'/ 388 PREFIX = PREF 389 RETURN 390 END 391 392 """ 393 394 allids = list(self.prefix_info.keys()) 395 allprefix = [self.prefix_info[key][0] for key in allids] 396 min_nexternal = min([len(ids[0]) for ids in allids]) 397 max_nexternal = max([len(ids[0]) for ids in allids]) 398 399 info = [] 400 for (key,pid), (prefix, tag) in self.prefix_info.items(): 401 info.append('#PY %s : %s # %s %s' % (tag, key, prefix, pid)) 402 403 404 text = [] 405 for n_ext in range(min_nexternal, max_nexternal+1): 406 current_id = [ids[0] for ids in allids if len(ids[0])==n_ext] 407 current_pid = [ids[1] for ids in allids if len(ids[0])==n_ext] 408 if not current_id: 409 continue 410 if min_nexternal != max_nexternal: 411 if n_ext == min_nexternal: 412 text.append(' if (npdg.eq.%i)then' % n_ext) 413 else: 414 text.append(' else if (npdg.eq.%i)then' % n_ext) 415 for ii,pdgs in enumerate(current_id): 416 pid = current_pid[ii] 417 condition = '.and.'.join(['%i.eq.pdgs(%i)' %(pdg, i+1) for i, pdg in enumerate(pdgs)]) 418 if ii==0: 419 text.append( ' if(%s.and.(procid.le.0.or.procid.eq.%d)) then ! %i' % (condition, pid, len(pdgs))) 420 else: 421 text.append( ' else if(%s.and.(procid.le.0.or.procid.eq.%d)) then ! %i' % (condition,pid,len(pdgs))) 422 text.append(' call %sget_me(p, ALPHAS, DSQRT(SCALES2), NHEL, ANS, RETURNCODE)' % self.prefix_info[(pdgs,pid)][0]) 423 text.append( ' else if(procid.gt.0) then !') 424 text.append( ' procid = -1' ) 425 text.append( ' goto 1' ) 426 427 text.append(' endif') 428 #close the function 429 if min_nexternal != max_nexternal: 430 text.append('endif') 431 432 params = self.get_model_parameter(self.model) 433 parameter_setup =[] 434 for key, var in params.items(): 435 parameter_setup.append(' CASE ("%s")\n %s = value\n MP__%s = value' 436 % (key, var, var)) 437 438 439 440 formatting = {'python_information':'\n'.join(info), 441 'smatrixhel': '\n'.join(text), 442 'maxpart': max_nexternal, 443 'nb_me': len(allids), 444 'pdgs': ','.join([str(pdg[i]) if i<len(pdg) else '0' 445 for i in range(max_nexternal) \ 446 for (pdg,pid) in allids]), 447 'prefix':'\',\''.join(allprefix), 448 'parameter_setup': '\n'.join(parameter_setup), 449 'pids': ','.join(str(pid) for (pdg,pid) in allids), 450 } 451 452 453 text = template % formatting 454 fsock = writers.FortranWriter(pjoin(self.dir_path, 'SubProcesses', 'all_matrix.f'),'w') 455 fsock.writelines(text) 456 fsock.close()
457 458 459
460 - def loop_additional_template_setup(self, copy_Source_makefile = True):
461 """ Perform additional actions specific for this class when setting 462 up the template with the copy_template function.""" 463 464 # We must change some files to their version for NLO computations 465 cpfiles= ["Cards/MadLoopParams.dat", 466 "SubProcesses/MadLoopParamReader.f", 467 "SubProcesses/MadLoopParams.inc"] 468 if copy_Source_makefile: 469 cpfiles.append("Source/makefile") 470 471 for file in cpfiles: 472 shutil.copy(os.path.join(self.loop_dir,'StandAlone/', file), 473 os.path.join(self.dir_path, file)) 474 475 cp(pjoin(self.loop_dir,'StandAlone/Cards/MadLoopParams.dat'), 476 pjoin(self.dir_path, 'Cards/MadLoopParams_default.dat')) 477 478 ln(pjoin(self.dir_path, 'Cards','MadLoopParams.dat'), pjoin(self.dir_path,'SubProcesses')) 479 480 # We might need to give a different name to the MadLoop makefile 481 shutil.copy(pjoin(self.loop_dir,'StandAlone','SubProcesses','makefile'), 482 pjoin(self.dir_path, 'SubProcesses',self.madloop_makefile_name)) 483 484 # Write SubProcesses/MadLoop_makefile_definitions with dummy variables 485 # for the non-optimized output 486 link_tir_libs=[] 487 tir_libs=[] 488 489 filePath = pjoin(self.dir_path, 'SubProcesses', 490 'MadLoop_makefile_definitions') 491 calls = self.write_loop_makefile_definitions( 492 writers.MakefileWriter(filePath),link_tir_libs,tir_libs) 493 494 # We need minimal editing of MadLoopCommons.f 495 # For the optimized output, this file will be overwritten once the 496 # availability of COLLIER has been determined. 497 MadLoopCommon = open(os.path.join(self.loop_dir,'StandAlone', 498 "SubProcesses","MadLoopCommons.inc")).read() 499 writer = writers.FortranWriter(os.path.join(self.dir_path, 500 "SubProcesses","MadLoopCommons.f")) 501 writer.writelines(MadLoopCommon%{ 502 'print_banner_commands':self.MadLoop_banner}, context={ 503 'collier_available':False}) 504 writer.close() 505 506 # Copy the whole MadLoop5_resources directory (empty at this stage) 507 if not os.path.exists(pjoin(self.dir_path,'SubProcesses', 508 'MadLoop5_resources')): 509 cp(pjoin(self.loop_dir,'StandAlone','SubProcesses', 510 'MadLoop5_resources'),pjoin(self.dir_path,'SubProcesses')) 511 512 # Link relevant cards from Cards inside the MadLoop5_resources 513 ln(pjoin(self.dir_path,'SubProcesses','MadLoopParams.dat'), 514 pjoin(self.dir_path,'SubProcesses','MadLoop5_resources')) 515 ln(pjoin(self.dir_path,'Cards','param_card.dat'), 516 pjoin(self.dir_path,'SubProcesses','MadLoop5_resources')) 517 ln(pjoin(self.dir_path,'Cards','ident_card.dat'), 518 pjoin(self.dir_path,'SubProcesses','MadLoop5_resources')) 519 520 # And remove check_sa in the SubProcess folder since now there is a 521 # check_sa tailored to each subprocess. 522 if os.path.isfile(pjoin(self.dir_path,'SubProcesses','check_sa.f')): 523 os.remove(pjoin(self.dir_path,'SubProcesses','check_sa.f')) 524 525 cwd = os.getcwd() 526 dirpath = os.path.join(self.dir_path, 'SubProcesses') 527 try: 528 os.chdir(dirpath) 529 except os.error: 530 logger.error('Could not cd to directory %s' % dirpath) 531 return 0 532 533 # Write the cts_mpc.h and cts_mprec.h files imported from CutTools 534 self.write_mp_files(writers.FortranWriter('cts_mprec.h'),\ 535 writers.FortranWriter('cts_mpc.h')) 536 537 # Return to original PWD 538 os.chdir(cwd) 539 540 # We must link the CutTools to the Library folder of the active Template 541 super(LoopProcessExporterFortranSA, self).link_CutTools(self.dir_path)
542 543 # This function is placed here and not in optimized exporterd, 544 # because the same makefile.inc should be used in all cases.
545 - def write_loop_makefile_definitions(self, writer, link_tir_libs, 546 tir_libs,tir_include=[]):
547 """ Create the file makefile which links to the TIR libraries.""" 548 549 file = open(os.path.join(self.loop_dir,'StandAlone', 550 'SubProcesses','MadLoop_makefile_definitions.inc')).read() 551 replace_dict={} 552 replace_dict['link_tir_libs']=' '.join(link_tir_libs) 553 replace_dict['tir_libs']=' '.join(tir_libs) 554 replace_dict['dotf']='%.f' 555 replace_dict['prefix']= self.SubProc_prefix 556 replace_dict['doto']='%.o' 557 replace_dict['tir_include']=' '.join(tir_include) 558 file=file%replace_dict 559 if writer: 560 writer.writelines(file) 561 else: 562 return file
563
564 - def convert_model(self, model, wanted_lorentz = [], 565 wanted_couplings = []):
566 """ Caches the aloha model created here when writing out the aloha 567 fortran subroutine. 568 """ 569 self.get_aloha_model(model) 570 super(LoopProcessExporterFortranSA, self).convert_model(model, 571 wanted_lorentz = wanted_lorentz, wanted_couplings = wanted_couplings)
572
573 - def get_ME_identifier(self, matrix_element, 574 group_number = None, group_elem_number = None):
575 """ A function returning a string uniquely identifying the matrix 576 element given in argument so that it can be used as a prefix to all 577 MadLoop5 subroutines and common blocks related to it. This allows 578 to compile several processes into one library as requested by the 579 BLHA (Binoth LesHouches Accord) guidelines. 580 The arguments group_number and proc_id are just for the LoopInduced 581 output with MadEvent.""" 582 583 # When disabling the loop grouping in the LoopInduced MadEvent output, 584 # we have only the group_number set and the proc_id set to None. In this 585 # case we don't print the proc_id. 586 if (not group_number is None) and group_elem_number is None: 587 return 'ML5_%d_%s_'%(matrix_element.get('processes')[0].get('id'), 588 group_number) 589 elif group_number is None or group_elem_number is None: 590 return 'ML5_%d_'%matrix_element.get('processes')[0].get('id') 591 else: 592 return 'ML5_%d_%s_%s_'%(matrix_element.get('processes')[0].get('id'), 593 group_number, group_elem_number)
594
595 - def get_SubProc_folder_name(self, process, 596 group_number = None, group_elem_number = None):
597 """Returns the name of the SubProcess directory, which can contain 598 the process goup and group element number for the case of loop-induced 599 integration with MadEvent.""" 600 601 # When disabling the loop grouping in the LoopInduced MadEvent output, 602 # we have only the group_number set and the proc_id set to None. In this 603 # case we don't print the proc_id. 604 if not group_number is None and group_elem_number is None: 605 return "%s%d_%s_%s"%(self.SubProc_prefix, process.get('id'), 606 group_number,process.shell_string(print_id=False)) 607 elif group_number is None or group_elem_number is None: 608 return "%s%s" %(self.SubProc_prefix,process.shell_string()) 609 else: 610 return "%s%d_%s_%s_%s"%(self.SubProc_prefix, process.get('id'), 611 group_number, group_elem_number,process.shell_string(print_id=False))
612 613 #=========================================================================== 614 # Set the compiler to be gfortran for the loop processes. 615 #===========================================================================
616 - def compiler_choice(self, compiler=export_v4.default_compiler):
617 """ Different daughter classes might want different compilers. 618 Here, the gfortran compiler is used throughout the compilation 619 (mandatory for CutTools written in f90) """ 620 if isinstance(compiler, str): 621 fortran_compiler = compiler 622 compiler = export_v4.default_compiler 623 compiler['fortran'] = fortran_compiler 624 625 if not compiler['fortran'] is None and not \ 626 any([name in compiler['fortran'] for name in \ 627 ['gfortran','ifort']]): 628 logger.info('For loop processes, the compiler must be fortran90'+\ 629 'compatible, like gfortran.') 630 compiler['fortran'] = 'gfortran' 631 self.set_compiler(compiler,True) 632 else: 633 self.set_compiler(compiler) 634 635 self.set_cpp_compiler(compiler['cpp'])
636
637 - def turn_to_mp_calls(self, helas_calls_list):
638 # Prepend 'MP_' to all the helas calls in helas_calls_list. 639 # Might look like a brutal unsafe implementation, but it is not as 640 # these calls are built from the properties of the HELAS objects and 641 # whether they are evaluated in double or quad precision is none of 642 # their business but only relevant to the output algorithm. 643 # Also the cast to complex masses DCMPLX(*) must be replaced by 644 # CMPLX(*,KIND=16) 645 MP=re.compile(r"(?P<toSub>^.*CALL\s+)",re.IGNORECASE | re.MULTILINE) 646 647 def replaceWith(match_obj): 648 return match_obj.group('toSub')+'MP_'
649 650 DCMPLX=re.compile(r"DCMPLX\((?P<toSub>([^\)]*))\)",\ 651 re.IGNORECASE | re.MULTILINE) 652 653 for i, helas_call in enumerate(helas_calls_list): 654 new_helas_call=MP.sub(replaceWith,helas_call) 655 helas_calls_list[i]=DCMPLX.sub(r"CMPLX(\g<toSub>,KIND=16)",\ 656 new_helas_call)
657 661 669
670 - def make(self):
671 """ Compiles the additional dependences for loop (such as CutTools).""" 672 super(LoopProcessExporterFortranSA, self).make() 673 674 # make CutTools (only necessary with MG option output_dependencies='internal') 675 libdir = os.path.join(self.dir_path,'lib') 676 sourcedir = os.path.join(self.dir_path,'Source') 677 if self.dependencies=='internal': 678 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 679 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 680 if os.path.exists(pjoin(sourcedir,'CutTools')): 681 logger.info('Compiling CutTools (can take a couple of minutes) ...') 682 misc.compile(['CutTools','-j1'], cwd = sourcedir, nb_core=1) 683 logger.info(' ...done.') 684 else: 685 raise MadGraph5Error('Could not compile CutTools because its'+\ 686 ' source directory could not be found in the SOURCE folder.') 687 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 688 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 689 raise MadGraph5Error('CutTools compilation failed.') 690 691 # Verify compatibility between current compiler and the one which was 692 # used when last compiling CutTools (if specified). 693 compiler_log_path = pjoin(os.path.dirname((os.path.realpath(pjoin( 694 libdir, 'libcts.a')))),'compiler_version.log') 695 if os.path.exists(compiler_log_path): 696 compiler_version_used = open(compiler_log_path,'r').read() 697 if not str(misc.get_gfortran_version(misc.detect_current_compiler(\ 698 pjoin(sourcedir,'make_opts')))) in compiler_version_used: 699 if os.path.exists(pjoin(sourcedir,'CutTools')): 700 logger.info('CutTools was compiled with a different fortran'+\ 701 ' compiler. Re-compiling it now...') 702 misc.compile(['cleanCT'], cwd = sourcedir) 703 misc.compile(['CutTools','-j1'], cwd = sourcedir, nb_core=1) 704 logger.info(' ...done.') 705 else: 706 raise MadGraph5Error("CutTools installation in %s"\ 707 %os.path.realpath(pjoin(libdir, 'libcts.a'))+\ 708 " seems to have been compiled with a different compiler than"+\ 709 " the one specified in MG5_aMC. Please recompile CutTools.")
710
711 - def cat_coeff(self, ff_number, frac, is_imaginary, Nc_power, Nc_value=3):
712 """Concatenate the coefficient information to reduce it to 713 (fraction, is_imaginary) """ 714 715 total_coeff = ff_number * frac * fractions.Fraction(Nc_value) ** Nc_power 716 717 return (total_coeff, is_imaginary)
718
719 - def get_amp_to_jamp_map(self, col_amps, n_amps):
720 """ Returns a list with element 'i' being a list of tuples corresponding 721 to all apparition of amplitude number 'i' in the jamp number 'j' 722 with coeff 'coeff_j'. The format of each tuple describing an apparition 723 is (j, coeff_j). where coeff_j is of the form (Fraction, is_imag).""" 724 725 if(isinstance(col_amps,list)): 726 if(col_amps and isinstance(col_amps[0],list)): 727 color_amplitudes=col_amps 728 else: 729 raise MadGraph5Error("Incorrect col_amps argument passed to get_amp_to_jamp_map") 730 else: 731 raise MadGraph5Error("Incorrect col_amps argument passed to get_amp_to_jamp_map") 732 733 # To store the result 734 res_list = [[] for i in range(n_amps)] 735 for i, coeff_list in enumerate(color_amplitudes): 736 for (coefficient, amp_number) in coeff_list: 737 res_list[amp_number-1].append((i,self.cat_coeff(\ 738 coefficient[0],coefficient[1],coefficient[2],coefficient[3]))) 739 740 return res_list
741
742 - def get_color_matrix(self, matrix_element):
743 """Return the color matrix definition lines. This color matrix is of size 744 NLOOPAMPSxNBORNAMPS and allows for squaring individually each Loop and Born 745 amplitude.""" 746 747 logger.info('Computing diagram color coefficients') 748 749 # The two lists have a list of tuples at element 'i' which correspond 750 # to all apparitions of loop amplitude number 'i' in the jampl number 'j' 751 # with coeff 'coeffj'. The format of each tuple describing an apparition 752 # is (j, coeffj). 753 ampl_to_jampl=self.get_amp_to_jamp_map(\ 754 matrix_element.get_loop_color_amplitudes(), 755 matrix_element.get_number_of_loop_amplitudes()) 756 if matrix_element.get('processes')[0].get('has_born'): 757 ampb_to_jampb=self.get_amp_to_jamp_map(\ 758 matrix_element.get_born_color_amplitudes(), 759 matrix_element.get_number_of_born_amplitudes()) 760 else: 761 ampb_to_jampb=ampl_to_jampl 762 # Below is the original color matrix multiplying the JAMPS 763 if matrix_element.get('color_matrix'): 764 ColorMatrixDenom = \ 765 matrix_element.get('color_matrix').get_line_denominators() 766 ColorMatrixNum = [ matrix_element.get('color_matrix').\ 767 get_line_numerators(index, denominator) for 768 (index, denominator) in enumerate(ColorMatrixDenom) ] 769 else: 770 ColorMatrixDenom= [1] 771 ColorMatrixNum = [[1]] 772 773 # Below is the final color matrix output 774 ColorMatrixNumOutput=[] 775 ColorMatrixDenomOutput=[] 776 777 # Now we construct the color factors between each born and loop amplitude 778 # by scanning their contributions to the different jamps. 779 start = time.time() 780 progress_bar = None 781 time_info = False 782 for i, jampl_list in enumerate(ampl_to_jampl): 783 # This can be pretty long for processes with many color flows. 784 # So, if necessary (i.e. for more than 15s), we tell the user the 785 # estimated time for the processing. 786 if i==5: 787 elapsed_time = time.time()-start 788 t = len(ampl_to_jampl)*(elapsed_time/5.0) 789 if t > 10.0: 790 time_info = True 791 logger.info('The color factors computation will take '+\ 792 ' about %s to run. '%str(datetime.timedelta(seconds=int(t)))+\ 793 'Started on %s.'%datetime.datetime.now().strftime(\ 794 "%d-%m-%Y %H:%M")) 795 if logger.getEffectiveLevel()<logging.WARNING: 796 widgets = ['Color computation:', pbar.Percentage(), ' ', 797 pbar.Bar(),' ', pbar.ETA(), ' '] 798 progress_bar = pbar.ProgressBar(widgets=widgets, 799 maxval=len(ampl_to_jampl), fd=sys.stdout) 800 801 if not progress_bar is None: 802 progress_bar.update(i+1) 803 # Flush to force the printout of the progress_bar to be updated 804 sys.stdout.flush() 805 806 line_num=[] 807 line_denom=[] 808 809 # Treat the special case where this specific amplitude contributes to no 810 # color flow at all. So it is zero because of color but not even due to 811 # an accidental cancellation among color flows, but simply because of its 812 # projection to each individual color flow is zero. In such case, the 813 # corresponding jampl_list is empty and all color coefficients must then 814 # be zero. This happens for example in the Higgs Effective Theory model 815 # for the bubble made of a 4-gluon vertex and the effective ggH vertex. 816 if len(jampl_list)==0: 817 line_num=[0]*len(ampb_to_jampb) 818 line_denom=[1]*len(ampb_to_jampb) 819 ColorMatrixNumOutput.append(line_num) 820 ColorMatrixDenomOutput.append(line_denom) 821 continue 822 823 for jampb_list in ampb_to_jampb: 824 real_num=0 825 imag_num=0 826 common_denom=color_amp.ColorMatrix.lcmm(*[abs(ColorMatrixDenom[jampl]* 827 ampl_coeff[0].denominator*ampb_coeff[0].denominator) for 828 ((jampl, ampl_coeff),(jampb,ampb_coeff)) in 829 itertools.product(jampl_list,jampb_list)]) 830 for ((jampl, ampl_coeff),(jampb, ampb_coeff)) in \ 831 itertools.product(jampl_list,jampb_list): 832 # take the numerator and multiply by lcm/denominator 833 # as we will later divide by the lcm. 834 buff_num=ampl_coeff[0].numerator*\ 835 ampb_coeff[0].numerator*ColorMatrixNum[jampl][jampb]*\ 836 abs(common_denom)/(ampl_coeff[0].denominator*\ 837 ampb_coeff[0].denominator*ColorMatrixDenom[jampl]) 838 # Remember that we must take the complex conjugate of 839 # the born jamp color coefficient because we will compute 840 # the square with 2 Re(LoopAmp x BornAmp*) 841 if ampl_coeff[1] and ampb_coeff[1]: 842 real_num=real_num+buff_num 843 elif not ampl_coeff[1] and not ampb_coeff[1]: 844 real_num=real_num+buff_num 845 elif not ampl_coeff[1] and ampb_coeff[1]: 846 imag_num=imag_num-buff_num 847 else: 848 imag_num=imag_num+buff_num 849 assert not (real_num!=0 and imag_num!=0), "MadGraph5_aMC@NLO found a "+\ 850 "color matrix element which has both a real and imaginary part." 851 if imag_num!=0: 852 assert int(imag_num) == imag_num and int(common_denom) == common_denom 853 res=fractions.Fraction(int(imag_num),int(common_denom)) 854 line_num.append(res.numerator) 855 # Negative denominator means imaginary color coef of the 856 # final color matrix 857 line_denom.append(res.denominator*-1) 858 else: 859 assert int(real_num) == real_num and int(common_denom) == common_denom 860 res=fractions.Fraction(int(real_num),int(common_denom)) 861 line_num.append(res.numerator) 862 # Positive denominator means real color coef of the final color matrix 863 line_denom.append(res.denominator) 864 865 ColorMatrixNumOutput.append(line_num) 866 ColorMatrixDenomOutput.append(line_denom) 867 868 if time_info: 869 logger.info('Finished on %s.'%datetime.datetime.now().strftime(\ 870 "%d-%m-%Y %H:%M")) 871 if progress_bar!=None: 872 progress_bar.finish() 873 874 return (ColorMatrixNumOutput,ColorMatrixDenomOutput)
875
876 - def get_context(self,matrix_element):
877 """ Returns the contextual variables which need to be set when 878 pre-processing the template files.""" 879 880 # The nSquaredSO entry of the general replace dictionary should have 881 # been set in write_loopmatrix prior to the first call to this function 882 # However, for cases where the TIRCaching contextual variable is 883 # irrelevant (like in the default output), this might not be the case 884 # so we set it to 1. 885 try: 886 n_squared_split_orders = matrix_element.rep_dict['nSquaredSO'] 887 except (KeyError, AttributeError): 888 n_squared_split_orders = 1 889 890 LoopInduced = not matrix_element.get('processes')[0].get('has_born') 891 self.has_loop_induced = max(LoopInduced, self.has_loop_induced) 892 # Force the computation of loop color flows for loop_induced processes 893 ComputeColorFlows = self.compute_color_flows or LoopInduced 894 # The variable AmplitudeReduction is just to make the contextual 895 # conditions more readable in the include files. 896 AmplitudeReduction = LoopInduced or ComputeColorFlows 897 # Even when not reducing at the amplitude level, the TIR caching 898 # is useful when there is more than one squared split order config. 899 TIRCaching = AmplitudeReduction or n_squared_split_orders>1 900 MadEventOutput = False 901 return {'LoopInduced': LoopInduced, 902 'ComputeColorFlows': ComputeColorFlows, 903 'AmplitudeReduction': AmplitudeReduction, 904 'TIRCaching': TIRCaching, 905 'MadEventOutput': MadEventOutput}
906 907 908 #=========================================================================== 909 # generate_subprocess_directory 910 #===========================================================================
911 - def generate_loop_subprocess(self, matrix_element, fortran_model, 912 group_number = None, proc_id = None, config_map=None, unique_id=None):
913 """Generate the Pxxxxx directory for a loop subprocess in MG4 standalone, 914 including the necessary loop_matrix.f, born_matrix.f and include files. 915 Notice that this is too different from generate_subprocess_directory 916 so that there is no point reusing this mother function. 917 The 'group_number' and 'proc_id' options are only used for the LoopInduced 918 MadEvent output and only to specify the ME_identifier and the P* 919 SubProcess directory name.""" 920 921 cwd = os.getcwd() 922 proc_dir_name = self.get_SubProc_folder_name( 923 matrix_element.get('processes')[0],group_number,proc_id) 924 dirpath = os.path.join(self.dir_path, 'SubProcesses', proc_dir_name) 925 926 try: 927 os.mkdir(dirpath) 928 except os.error as error: 929 logger.warning(error.strerror + " " + dirpath) 930 931 try: 932 os.chdir(dirpath) 933 except os.error: 934 logger.error('Could not cd to directory %s' % dirpath) 935 return 0 936 937 logger.info('Creating files in directory %s' % dirpath) 938 939 if unique_id is None: 940 raise MadGraph5Error('A unique id must be provided to the function'+\ 941 'generate_loop_subprocess of LoopProcessExporterFortranSA.') 942 # Create an include with the unique consecutive ID assigned 943 open('unique_id.inc','w').write( 944 """ integer UNIQUE_ID 945 parameter(UNIQUE_ID=%d)"""%unique_id) 946 947 # Extract number of external particles 948 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 949 950 calls=self.write_loop_matrix_element_v4(None,matrix_element, 951 fortran_model, group_number = group_number, 952 proc_id = proc_id, config_map = config_map) 953 954 # We assume here that all processes must share the same property of 955 # having a born or not, which must be true anyway since these are two 956 # definite different classes of processes which can never be treated on 957 # the same footing. 958 if matrix_element.get('processes')[0].get('has_born'): 959 filename = 'born_matrix.f' 960 calls = self.write_bornmatrix( 961 writers.FortranWriter(filename), 962 matrix_element, 963 fortran_model) 964 965 filename = 'pmass.inc' 966 self.write_pmass_file(writers.FortranWriter(filename), 967 matrix_element) 968 969 filename = 'ngraphs.inc' 970 self.write_ngraphs_file(writers.FortranWriter(filename), 971 len(matrix_element.get_all_amplitudes())) 972 973 # Do not draw the loop diagrams if they are too many. 974 # The user can always decide to do it manually, if really needed 975 loop_diags = [loop_diag for loop_diag in\ 976 matrix_element.get('base_amplitude').get('loop_diagrams')\ 977 if isinstance(loop_diag,LoopDiagram) and loop_diag.get('type') > 0] 978 if len(loop_diags)>5000: 979 logger.info("There are more than 5000 loop diagrams."+\ 980 "Only the first 5000 are drawn.") 981 filename = "loop_matrix.ps" 982 plot = draw.MultiEpsDiagramDrawer(base_objects.DiagramList( 983 loop_diags[:5000]),filename, 984 model=matrix_element.get('processes')[0].get('model'),amplitude='') 985 logger.info("Drawing loop Feynman diagrams for " + \ 986 matrix_element.get('processes')[0].nice_string()) 987 plot.draw() 988 989 if matrix_element.get('processes')[0].get('has_born'): 990 filename = "born_matrix.ps" 991 plot = draw.MultiEpsDiagramDrawer(matrix_element.get('base_amplitude').\ 992 get('born_diagrams'), 993 filename, 994 model=matrix_element.get('processes')[0].\ 995 get('model'), 996 amplitude='') 997 logger.info("Generating born Feynman diagrams for " + \ 998 matrix_element.get('processes')[0].nice_string(\ 999 print_weighted=False)) 1000 plot.draw() 1001 1002 self.link_files_from_Subprocesses(self.get_SubProc_folder_name( 1003 matrix_element.get('processes')[0],group_number,proc_id)) 1004 1005 # Return to original PWD 1006 os.chdir(cwd) 1007 1008 if not calls: 1009 calls = 0 1010 return calls
1011 1032
1033 - def generate_general_replace_dict(self,matrix_element, 1034 group_number = None, proc_id = None):
1035 """Generates the entries for the general replacement dictionary used 1036 for the different output codes for this exporter.The arguments 1037 group_number and proc_id are just for the LoopInduced output with MadEvent.""" 1038 1039 dict={} 1040 # A general process prefix which appears in front of all MadLooop 1041 # subroutines and common block so that several processes can be compiled 1042 # together into one library, as necessary to follow BLHA guidelines. 1043 1044 dict['proc_prefix'] = self.get_ME_identifier(matrix_element, 1045 group_number = group_number, group_elem_number = proc_id) 1046 1047 if 'prefix' in self.cmd_options and self.cmd_options['prefix'] in ['int','proc']: 1048 for proc in matrix_element.get('processes'): 1049 ids = [l.get('id') for l in proc.get('legs_with_decays')] 1050 self.prefix_info[tuple(ids),proc.get('id')] = [dict['proc_prefix'], proc.get_tag()] 1051 1052 # The proc_id is used for MadEvent grouping, so none of our concern here 1053 # and it is simply set to an empty string. 1054 dict['proc_id'] = '' 1055 # Extract version number and date from VERSION file 1056 info_lines = self.get_mg5_info_lines() 1057 dict['info_lines'] = info_lines 1058 # Extract process info lines 1059 process_lines = self.get_process_info_lines(matrix_element) 1060 dict['process_lines'] = process_lines 1061 # Extract number of external particles 1062 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 1063 dict['nexternal'] = nexternal 1064 dict['nincoming'] = ninitial 1065 # Extract ncomb 1066 ncomb = matrix_element.get_helicity_combinations() 1067 dict['ncomb'] = ncomb 1068 # Extract nloopamps 1069 nloopamps = matrix_element.get_number_of_loop_amplitudes() 1070 dict['nloopamps'] = nloopamps 1071 # Extract nloopdiags 1072 nloopdiags = len(matrix_element.get('diagrams')) 1073 dict['nloopdiags'] = nloopdiags 1074 # Extract nctamps 1075 nctamps = matrix_element.get_number_of_CT_amplitudes() 1076 dict['nctamps'] = nctamps 1077 # Extract nwavefuncs 1078 nwavefuncs = matrix_element.get_number_of_external_wavefunctions() 1079 dict['nwavefuncs'] = nwavefuncs 1080 # Set format of the double precision 1081 dict['real_dp_format']='real*8' 1082 dict['real_mp_format']='real*16' 1083 # Set format of the complex 1084 dict['complex_dp_format']='complex*16' 1085 dict['complex_mp_format']='complex*32' 1086 # Set format of the masses 1087 dict['mass_dp_format'] = dict['complex_dp_format'] 1088 dict['mass_mp_format'] = dict['complex_mp_format'] 1089 # Fill in default values for the placeholders for the madevent 1090 # loop-induced output 1091 dict['nmultichannels'] = 0 1092 dict['nmultichannel_configs'] = 0 1093 dict['config_map_definition'] = '' 1094 dict['config_index_map_definition'] = '' 1095 # Color matrix size 1096 # For loop induced processes it is NLOOPAMPSxNLOOPAMPS and otherwise 1097 # it is NLOOPAMPSxNBORNAMPS 1098 # Also, how to access the number of Born squared order contributions 1099 1100 if matrix_element.get('processes')[0].get('has_born'): 1101 dict['color_matrix_size'] = 'nbornamps' 1102 dict['get_nsqso_born']=\ 1103 "include 'nsqso_born.inc'" 1104 else: 1105 dict['get_nsqso_born']="""INTEGER NSQSO_BORN 1106 PARAMETER (NSQSO_BORN=0) 1107 """ 1108 dict['color_matrix_size'] = 'nloopamps' 1109 1110 # These placeholders help to have as many common templates for the 1111 # output of the loop induced processes and those with a born 1112 # contribution. 1113 if matrix_element.get('processes')[0].get('has_born'): 1114 # Extract nbornamps 1115 nbornamps = matrix_element.get_number_of_born_amplitudes() 1116 dict['nbornamps'] = nbornamps 1117 dict['ncomb_helas_objs'] = ',ncomb' 1118 dict['nbornamps_decl'] = \ 1119 """INTEGER NBORNAMPS 1120 PARAMETER (NBORNAMPS=%d)"""%nbornamps 1121 dict['nBornAmps'] = nbornamps 1122 1123 else: 1124 dict['ncomb_helas_objs'] = '' 1125 dict['dp_born_amps_decl'] = '' 1126 dict['dp_born_amps_decl_in_mp'] = '' 1127 dict['copy_mp_to_dp_born_amps'] = '' 1128 dict['mp_born_amps_decl'] = '' 1129 dict['nbornamps_decl'] = '' 1130 dict['nbornamps'] = 0 1131 dict['nBornAmps'] = 0 1132 1133 return dict
1134
1135 - def write_loop_matrix_element_v4(self, writer, matrix_element, fortran_model, 1136 group_number = None, proc_id = None, config_map = None):
1137 """ Writes loop_matrix.f, CT_interface.f, loop_num.f and 1138 mp_born_amps_and_wfs. 1139 The arguments group_number and proc_id are just for the LoopInduced 1140 output with MadEvent and only used in get_ME_identifier. 1141 """ 1142 1143 # Create the necessary files for the loop matrix element subroutine 1144 1145 if config_map: 1146 raise MadGraph5Error('The default loop output cannot be used with'+\ 1147 'MadEvent and cannot compute the AMP2 for multi-channeling.') 1148 1149 if not isinstance(fortran_model,\ 1150 helas_call_writers.FortranUFOHelasCallWriter): 1151 raise MadGraph5Error('The loop fortran output can only'+\ 1152 ' work with a UFO Fortran model') 1153 1154 LoopFortranModel = helas_call_writers.FortranUFOHelasCallWriter( 1155 argument=fortran_model.get('model'), 1156 hel_sum=matrix_element.get('processes')[0].get('has_born')) 1157 1158 # Compute the analytical information of the loop wavefunctions in the 1159 # loop helas matrix elements using the cached aloha model to reuse 1160 # as much as possible the aloha computations already performed for 1161 # writing out the aloha fortran subroutines. 1162 matrix_element.compute_all_analytic_information( 1163 self.get_aloha_model(matrix_element.get('processes')[0].get('model'))) 1164 1165 # Initialize a general replacement dictionary with entries common to 1166 # many files generated here. 1167 matrix_element.rep_dict = self.generate_general_replace_dict( 1168 matrix_element, group_number = group_number, proc_id = proc_id) 1169 1170 # Extract max number of loop couplings (specific to this output type) 1171 matrix_element.rep_dict['maxlcouplings']= \ 1172 matrix_element.find_max_loop_coupling() 1173 # The born amp declaration suited for also outputing the loop-induced 1174 # processes as well. 1175 if matrix_element.get('processes')[0].get('has_born'): 1176 matrix_element.rep_dict['dp_born_amps_decl_in_mp'] = \ 1177 matrix_element.rep_dict['complex_dp_format']+" DPAMP(NBORNAMPS,NCOMB)"+\ 1178 "\n common/%sAMPS/DPAMP"%matrix_element.rep_dict['proc_prefix'] 1179 matrix_element.rep_dict['dp_born_amps_decl'] = \ 1180 matrix_element.rep_dict['complex_dp_format']+" AMP(NBORNAMPS,NCOMB)"+\ 1181 "\n common/%sAMPS/AMP"%matrix_element.rep_dict['proc_prefix'] 1182 matrix_element.rep_dict['mp_born_amps_decl'] = \ 1183 matrix_element.rep_dict['complex_mp_format']+" AMP(NBORNAMPS,NCOMB)"+\ 1184 "\n common/%sMP_AMPS/AMP"%matrix_element.rep_dict['proc_prefix'] 1185 matrix_element.rep_dict['copy_mp_to_dp_born_amps'] = \ 1186 '\n'.join(['DO I=1,NBORNAMPS','DPAMP(I,H)=AMP(I,H)','ENDDO']) 1187 1188 if writer: 1189 raise MadGraph5Error('Matrix output mode no longer supported.') 1190 1191 filename = 'loop_matrix.f' 1192 calls = self.write_loopmatrix(writers.FortranWriter(filename), 1193 matrix_element, 1194 LoopFortranModel) 1195 1196 # Write out the proc_prefix in a file, this is quite handy 1197 proc_prefix_writer = writers.FortranWriter('proc_prefix.txt','w') 1198 proc_prefix_writer.write(matrix_element.rep_dict['proc_prefix']) 1199 proc_prefix_writer.close() 1200 1201 filename = 'check_sa.f' 1202 self.write_check_sa(writers.FortranWriter(filename),matrix_element) 1203 1204 filename = 'CT_interface.f' 1205 self.write_CT_interface(writers.FortranWriter(filename),\ 1206 matrix_element) 1207 1208 1209 1210 filename = 'improve_ps.f' 1211 calls = self.write_improve_ps(writers.FortranWriter(filename), 1212 matrix_element) 1213 1214 filename = 'loop_num.f' 1215 self.write_loop_num(writers.FortranWriter(filename),\ 1216 matrix_element,LoopFortranModel) 1217 1218 filename = 'mp_born_amps_and_wfs.f' 1219 self.write_born_amps_and_wfs(writers.FortranWriter(filename),\ 1220 matrix_element,LoopFortranModel) 1221 1222 # Extract number of external particles 1223 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 1224 filename = 'nexternal.inc' 1225 self.write_nexternal_file(writers.FortranWriter(filename), 1226 nexternal, ninitial) 1227 1228 filename = 'process_info.inc' 1229 self.write_process_info_file(writers.FortranWriter(filename), 1230 matrix_element) 1231 return calls
1232
1233 - def write_process_info_file(self, writer, matrix_element):
1234 """A small structural function to write the include file specifying some 1235 process characteristics.""" 1236 1237 model = matrix_element.get('processes')[0].get('model') 1238 process_info = {} 1239 # The maximum spin of any particle connected (or directly running in) 1240 # any loop of this matrix element. This is important because there is 1241 # some limitation in the stability tests that can be performed when this 1242 # maximum spin is above 3 (vectors). Also CutTools has limitations in 1243 # this regard. 1244 process_info['max_spin_connected_to_loop']=\ 1245 matrix_element.get_max_spin_connected_to_loop() 1246 1247 process_info['max_spin_external_particle']= max( 1248 model.get_particle(l.get('id')).get('spin') for l in 1249 matrix_element.get('processes')[0].get('legs')) 1250 1251 proc_include = \ 1252 """ 1253 INTEGER MAX_SPIN_CONNECTED_TO_LOOP 1254 PARAMETER(MAX_SPIN_CONNECTED_TO_LOOP=%(max_spin_connected_to_loop)d) 1255 INTEGER MAX_SPIN_EXTERNAL_PARTICLE 1256 PARAMETER(MAX_SPIN_EXTERNAL_PARTICLE=%(max_spin_external_particle)d) 1257 """%process_info 1258 1259 writer.writelines(proc_include)
1260
1261 - def generate_subprocess_directory(self, matrix_element, fortran_model):
1262 """ To overload the default name for this function such that the correct 1263 function is used when called from the command interface """ 1264 1265 self.unique_id +=1 1266 return self.generate_loop_subprocess(matrix_element,fortran_model, 1267 unique_id=self.unique_id)
1268
1269 - def write_check_sa(self, writer, matrix_element):
1270 """Writes out the steering code check_sa. In the optimized output mode, 1271 All the necessary entries in the replace_dictionary have already been 1272 set in write_loopmatrix because it is only there that one has access to 1273 the information about split orders.""" 1274 replace_dict = copy.copy(matrix_element.rep_dict) 1275 for key in ['print_so_born_results','print_so_loop_results', 1276 'write_so_born_results','write_so_loop_results','set_coupling_target']: 1277 if key not in list(replace_dict.keys()): 1278 replace_dict[key]='' 1279 1280 if matrix_element.get('processes')[0].get('has_born'): 1281 file = open(os.path.join(self.template_dir,'check_sa.inc')).read() 1282 else: 1283 file = open(os.path.join(self.template_dir,\ 1284 'check_sa_loop_induced.inc')).read() 1285 file=file%replace_dict 1286 writer.writelines(file) 1287 1288 # We can always write the f2py wrapper if present (in loop optimized mode, it is) 1289 if not os.path.isfile(pjoin(self.template_dir,'check_py.f.inc')): 1290 return 1291 1292 file = open(os.path.join(self.template_dir,\ 1293 'check_py.f.inc')).read() 1294 1295 if 'prefix' in self.cmd_options and self.cmd_options['prefix'] in ['int','proc']: 1296 replace_dict['prefix_routine'] = replace_dict['proc_prefix'] 1297 else: 1298 replace_dict['prefix_routine'] = '' 1299 file=file%replace_dict 1300 new_path = writer.name.replace('check_sa.f', 'f2py_wrapper.f') 1301 new_writer = writer.__class__(new_path, 'w') 1302 new_writer.writelines(file) 1303 1304 file = open(os.path.join(self.template_dir,\ 1305 'check_sa.py.inc')).read() 1306 # For now just put in an empty PS point but in the future, maybe generate 1307 # a valid one already here by default 1308 curr_proc = matrix_element.get('processes')[0] 1309 random_PSpoint_python_formatted = \ 1310 """# Specify your chosen PS point below. If you leave it filled with None, then the script will attempt to read it from the file PS.input. 1311 p= [[None,]*4]*%d"""%len(curr_proc.get('legs')) 1312 1313 process_definition_string = curr_proc.nice_string().replace('Process:','') 1314 file=file.format(random_PSpoint_python_formatted,process_definition_string, 1315 replace_dict['proc_prefix'].lower()) 1316 new_path = writer.name.replace('check_sa.f', 'check_sa.py') 1317 new_writer = open(new_path, 'w') 1318 new_writer.writelines(file) 1319 # Make it executable 1320 os.chmod(new_path, os.stat(new_path).st_mode | stat.S_IEXEC)
1321
1322 - def write_improve_ps(self, writer, matrix_element):
1323 """ Write out the improve_ps subroutines which modify the PS point 1324 given in input and slightly deform it to achieve exact onshellness on 1325 all external particles as well as perfect energy-momentum conservation""" 1326 replace_dict = copy.copy(matrix_element.rep_dict) 1327 1328 (nexternal,ninitial)=matrix_element.get_nexternal_ninitial() 1329 replace_dict['ninitial']=ninitial 1330 mass_list=matrix_element.get_external_masses()[:-2] 1331 mp_variable_prefix = check_param_card.ParamCard.mp_prefix 1332 1333 # Write the quadruple precision version of this routine only. 1334 replace_dict['real_format']=replace_dict['real_mp_format'] 1335 replace_dict['mp_prefix']='MP_' 1336 replace_dict['exp_letter']='e' 1337 replace_dict['mp_specifier']='_16' 1338 replace_dict['coupl_inc_name']='mp_coupl.inc' 1339 replace_dict['masses_def']='\n'.join(['MASSES(%(i)d)=%(prefix)s%(m)s'\ 1340 %{'i':i+1,'m':m, 'prefix':mp_variable_prefix} for \ 1341 i, m in enumerate(mass_list)]) 1342 file_mp = open(os.path.join(self.template_dir,'improve_ps.inc')).read() 1343 file_mp=file_mp%replace_dict 1344 # 1345 writer.writelines(file_mp)
1346
1347 - def write_loop_num(self, writer, matrix_element,fortran_model):
1348 """ Create the file containing the core subroutine called by CutTools 1349 which contains the Helas calls building the loop""" 1350 1351 if not matrix_element.get('processes') or \ 1352 not matrix_element.get('diagrams'): 1353 return 0 1354 1355 # Set lowercase/uppercase Fortran code 1356 writers.FortranWriter.downcase = False 1357 1358 file = open(os.path.join(self.template_dir,'loop_num.inc')).read() 1359 1360 replace_dict = copy.copy(matrix_element.rep_dict) 1361 1362 loop_helas_calls=fortran_model.get_loop_amplitude_helas_calls(matrix_element) 1363 replace_dict['maxlcouplings']=matrix_element.find_max_loop_coupling() 1364 replace_dict['loop_helas_calls'] = "\n".join(loop_helas_calls) 1365 1366 # The squaring is only necessary for the processes with born where the 1367 # sum over helicities is done before sending the numerator to CT. 1368 dp_squaring_lines=['DO I=1,NBORNAMPS', 1369 'CFTOT=DCMPLX(CF_N(AMPLNUM,I)/DBLE(ABS(CF_D(AMPLNUM,I))),0.0d0)', 1370 'IF(CF_D(AMPLNUM,I).LT.0) CFTOT=CFTOT*IMAG1', 1371 'RES=RES+CFTOT*BUFF*DCONJG(AMP(I,H))','ENDDO'] 1372 mp_squaring_lines=['DO I=1,NBORNAMPS', 1373 'CFTOT=CMPLX(CF_N(AMPLNUM,I)/(1.0E0_16*ABS(CF_D(AMPLNUM,I))),0.0E0_16,KIND=16)', 1374 'IF(CF_D(AMPLNUM,I).LT.0) CFTOT=CFTOT*IMAG1', 1375 'QPRES=QPRES+CFTOT*BUFF*CONJG(AMP(I,H))','ENDDO'] 1376 if matrix_element.get('processes')[0].get('has_born'): 1377 replace_dict['dp_squaring']='\n'.join(dp_squaring_lines) 1378 replace_dict['mp_squaring']='\n'.join(mp_squaring_lines) 1379 else: 1380 replace_dict['dp_squaring']='RES=BUFF' 1381 replace_dict['mp_squaring']='QPRES=BUFF' 1382 1383 # Prepend MP_ to all helas calls. 1384 self.turn_to_mp_calls(loop_helas_calls) 1385 replace_dict['mp_loop_helas_calls'] = "\n".join(loop_helas_calls) 1386 1387 file=file%replace_dict 1388 1389 if writer: 1390 writer.writelines(file) 1391 else: 1392 return file
1393
1394 - def write_CT_interface(self, writer, matrix_element, optimized_output=False):
1395 """ Create the file CT_interface.f which contains the subroutine defining 1396 the loop HELAS-like calls along with the general interfacing subroutine. 1397 It is used to interface against any OPP tool, including Samurai and Ninja.""" 1398 1399 files=[] 1400 1401 # First write CT_interface which interfaces MG5 with CutTools. 1402 replace_dict=copy.copy(matrix_element.rep_dict) 1403 1404 # We finalize CT result differently wether we used the built-in 1405 # squaring against the born. 1406 if matrix_element.get('processes')[0].get('has_born'): 1407 replace_dict['finalize_CT']='\n'.join([\ 1408 'RES(%d)=NORMALIZATION*2.0d0*DBLE(RES(%d))'%(i,i) for i in range(1,4)]) 1409 else: 1410 replace_dict['finalize_CT']='\n'.join([\ 1411 'RES(%d)=NORMALIZATION*RES(%d)'%(i,i) for i in range(1,4)]) 1412 1413 file = open(os.path.join(self.template_dir,'CT_interface.inc')).read() 1414 1415 file = file % replace_dict 1416 files.append(file) 1417 1418 # Now collect the different kind of subroutines needed for the 1419 # loop HELAS-like calls. 1420 HelasLoopAmpsCallKeys=matrix_element.get_used_helas_loop_amps() 1421 1422 for callkey in HelasLoopAmpsCallKeys: 1423 replace_dict=copy.copy(matrix_element.rep_dict) 1424 # Add to this dictionary all other attribute common to all 1425 # HELAS-like loop subroutines. 1426 if matrix_element.get('processes')[0].get('has_born'): 1427 replace_dict['validh_or_nothing']=',validh' 1428 else: 1429 replace_dict['validh_or_nothing']='' 1430 # In the optimized output, the number of couplings in the loop is 1431 # not specified so we only treat it here if necessary: 1432 if len(callkey)>2: 1433 replace_dict['ncplsargs']=callkey[2] 1434 cplsargs="".join(["C%d,MP_C%d, "%(i,i) for i in range(1,callkey[2]+1)]) 1435 replace_dict['cplsargs']=cplsargs 1436 cplsdecl="".join(["C%d, "%i for i in range(1,callkey[2]+1)])[:-2] 1437 replace_dict['cplsdecl']=cplsdecl 1438 mp_cplsdecl="".join(["MP_C%d, "%i for i in range(1,callkey[2]+1)])[:-2] 1439 replace_dict['mp_cplsdecl']=mp_cplsdecl 1440 cplset="\n".join(["\n".join(["LC(%d)=C%d"%(i,i),\ 1441 "MP_LC(%d)=MP_C%d"%(i,i)])\ 1442 for i in range(1,callkey[2]+1)]) 1443 replace_dict['cplset']=cplset 1444 1445 replace_dict['nloopline']=callkey[0] 1446 wfsargs="".join(["W%d, "%i for i in range(1,callkey[1]+1)]) 1447 replace_dict['wfsargs']=wfsargs 1448 # We don't pass the multiple precision mass in the optimized_output 1449 if not optimized_output: 1450 margs="".join(["M%d,MP_M%d, "%(i,i) for i in range(1,callkey[0]+1)]) 1451 else: 1452 margs="".join(["M%d, "%i for i in range(1,callkey[0]+1)]) 1453 replace_dict['margs']=margs 1454 wfsargsdecl="".join([("W%d, "%i) for i in range(1,callkey[1]+1)])[:-2] 1455 replace_dict['wfsargsdecl']=wfsargsdecl 1456 margsdecl="".join(["M%d, "%i for i in range(1,callkey[0]+1)])[:-2] 1457 replace_dict['margsdecl']=margsdecl 1458 mp_margsdecl="".join(["MP_M%d, "%i for i in range(1,callkey[0]+1)])[:-2] 1459 replace_dict['mp_margsdecl']=mp_margsdecl 1460 weset="\n".join([("WE("+str(i)+")=W"+str(i)) for \ 1461 i in range(1,callkey[1]+1)]) 1462 replace_dict['weset']=weset 1463 weset="\n".join([("WE(%d)=W%d"%(i,i)) for i in range(1,callkey[1]+1)]) 1464 replace_dict['weset']=weset 1465 msetlines=["M2L(1)=M%d**2"%(callkey[0]),] 1466 mset="\n".join(msetlines+["M2L(%d)=M%d**2"%(i,i-1) for \ 1467 i in range(2,callkey[0]+1)]) 1468 replace_dict['mset']=mset 1469 mset2lines=["ML(1)=M%d"%(callkey[0]),"ML(2)=M%d"%(callkey[0]), 1470 "MP_ML(1)=MP_M%d"%(callkey[0]),"MP_ML(2)=MP_M%d"%(callkey[0])] 1471 mset2="\n".join(mset2lines+["\n".join(["ML(%d)=M%d"%(i,i-2), 1472 "MP_ML(%d)=MP_M%d"%(i,i-2)]) for \ 1473 i in range(3,callkey[0]+3)]) 1474 replace_dict['mset2']=mset2 1475 replace_dict['nwfsargs'] = callkey[1] 1476 if callkey[0]==callkey[1]: 1477 replace_dict['nwfsargs_header'] = "" 1478 replace_dict['pairingargs']="" 1479 replace_dict['pairingdecl']="" 1480 pairingset="""DO I=1,NLOOPLINE 1481 PAIRING(I)=1 1482 ENDDO 1483 """ 1484 replace_dict['pairingset']=pairingset 1485 else: 1486 replace_dict['nwfsargs_header'] = '_%d'%callkey[1] 1487 pairingargs="".join([("P"+str(i)+", ") for i in \ 1488 range(1,callkey[0]+1)]) 1489 replace_dict['pairingargs']=pairingargs 1490 pairingdecl="integer "+"".join([("P"+str(i)+", ") for i in \ 1491 range(1,callkey[0]+1)])[:-2] 1492 replace_dict['pairingdecl']=pairingdecl 1493 pairingset="\n".join([("PAIRING("+str(i)+")=P"+str(i)) for \ 1494 i in range(1,callkey[0]+1)]) 1495 replace_dict['pairingset']=pairingset 1496 1497 file = open(os.path.join(self.template_dir,\ 1498 'helas_loop_amplitude.inc')).read() 1499 file = file % replace_dict 1500 files.append(file) 1501 1502 file="\n".join(files) 1503 1504 if writer: 1505 writer.writelines(file,context=self.get_context(matrix_element)) 1506 else: 1507 return file
1508 1509 # Helper function to split HELAS CALLS in dedicated subroutines placed 1510 # in different files.
1511 - def split_HELASCALLS(self, writer, replace_dict, template_name, masterfile, \ 1512 helas_calls, entry_name, bunch_name,n_helas=2000, 1513 required_so_broadcaster = 'LOOP_REQ_SO_DONE', 1514 continue_label = 1000, momenta_array_name='P', 1515 context={}):
1516 """ Finish the code generation with splitting. 1517 Split the helas calls in the argument helas_calls into bunches of 1518 size n_helas and place them in dedicated subroutine with name 1519 <bunch_name>_i. Also setup the corresponding calls to these subroutine 1520 in the replace_dict dictionary under the entry entry_name. 1521 The context specified will be forwarded to the the fileWriter.""" 1522 helascalls_replace_dict=copy.copy(replace_dict) 1523 helascalls_replace_dict['bunch_name']=bunch_name 1524 helascalls_files=[] 1525 for i, k in enumerate(range(0, len(helas_calls), n_helas)): 1526 helascalls_replace_dict['bunch_number']=i+1 1527 helascalls_replace_dict['helas_calls']=\ 1528 '\n'.join(helas_calls[k:k + n_helas]) 1529 helascalls_replace_dict['required_so_broadcaster']=\ 1530 required_so_broadcaster 1531 helascalls_replace_dict['continue_label']=continue_label 1532 new_helascalls_file = open(os.path.join(self.template_dir,\ 1533 template_name)).read() 1534 new_helascalls_file = new_helascalls_file % helascalls_replace_dict 1535 helascalls_files.append(new_helascalls_file) 1536 # Setup the call to these HELASCALLS subroutines in loop_matrix.f 1537 helascalls_calls = [ "CALL %s%s_%d(%s,NHEL,H,IC)"%\ 1538 (replace_dict['proc_prefix'] ,bunch_name,a+1,momenta_array_name) \ 1539 for a in range(len(helascalls_files))] 1540 replace_dict[entry_name]='\n'.join(helascalls_calls) 1541 if writer: 1542 for i, helascalls_file in enumerate(helascalls_files): 1543 filename = '%s_%d.f'%(bunch_name,i+1) 1544 writers.FortranWriter(filename).writelines(helascalls_file, 1545 context=context) 1546 else: 1547 masterfile='\n'.join([masterfile,]+helascalls_files) 1548 1549 return masterfile
1550
1551 - def write_loopmatrix(self, writer, matrix_element, fortran_model, 1552 noSplit=False):
1553 """Create the loop_matrix.f file.""" 1554 1555 if not matrix_element.get('processes') or \ 1556 not matrix_element.get('diagrams'): 1557 return 0 1558 1559 # Set lowercase/uppercase Fortran code 1560 1561 writers.FortranWriter.downcase = False 1562 1563 replace_dict = copy.copy(matrix_element.rep_dict) 1564 1565 # Extract overall denominator 1566 # Averaging initial state color, spin, and identical FS particles 1567 den_factor_line = self.get_den_factor_line(matrix_element) 1568 replace_dict['den_factor_line'] = den_factor_line 1569 # When the user asks for the polarized matrix element we must 1570 # multiply back by the helicity averaging factor 1571 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor() 1572 replace_dict['beamone_helavgfactor'], replace_dict['beamtwo_helavgfactor'] =\ 1573 matrix_element.get_beams_hel_avg_factor() 1574 1575 # These entries are specific for the output for loop-induced processes 1576 # Also sets here the details of the squaring of the loop ampltiudes 1577 # with the born or the loop ones. 1578 if not matrix_element.get('processes')[0].get('has_born'): 1579 replace_dict['compute_born']=\ 1580 """C There is of course no born for loop induced processes 1581 ANS(0)=0.0d0 1582 """ 1583 replace_dict['set_reference']='\n'.join([ 1584 'C For loop-induced, the reference for comparison is set later'+\ 1585 ' from the total contribution of the previous PS point considered.', 1586 'C But you can edit here the value to be used for the first PS point.', 1587 'if (NPSPOINTS.eq.0) then','ref=1.0d-50','else', 1588 'ref=nextRef/DBLE(NPSPOINTS)','endif']) 1589 replace_dict['loop_induced_setup'] = '\n'.join([ 1590 'HELPICKED_BU=HELPICKED','HELPICKED=H','MP_DONE=.FALSE.', 1591 'IF(SKIPLOOPEVAL) THEN','GOTO 1227','ENDIF']) 1592 replace_dict['loop_induced_finalize'] = \ 1593 ("""DO I=NCTAMPS+1,NLOOPAMPS 1594 IF((CTMODERUN.NE.-1).AND..NOT.CHECKPHASE.AND.(.NOT.S(I))) THEN 1595 WRITE(*,*) '##W03 WARNING Contribution ',I 1596 WRITE(*,*) ' is unstable for helicity ',H 1597 ENDIF 1598 C IF(.NOT.%(proc_prefix)sISZERO(ABS(AMPL(2,I))+ABS(AMPL(3,I)),REF,-1,H)) THEN 1599 C WRITE(*,*) '##W04 WARNING Contribution ',I,' for helicity ',H,' has a contribution to the poles.' 1600 C WRITE(*,*) 'Finite contribution = ',AMPL(1,I) 1601 C WRITE(*,*) 'single pole contribution = ',AMPL(2,I) 1602 C WRITE(*,*) 'double pole contribution = ',AMPL(3,I) 1603 C ENDIF 1604 ENDDO 1605 1227 CONTINUE 1606 HELPICKED=HELPICKED_BU""")%replace_dict 1607 replace_dict['loop_helas_calls']="" 1608 replace_dict['nctamps_or_nloopamps']='nloopamps' 1609 replace_dict['nbornamps_or_nloopamps']='nloopamps' 1610 replace_dict['squaring']=\ 1611 """ANS(1)=ANS(1)+DBLE(CFTOT*AMPL(1,I)*DCONJG(AMPL(1,J))) 1612 IF (J.EQ.1) THEN 1613 ANS(2)=ANS(2)+DBLE(CFTOT*AMPL(2,I))+DIMAG(CFTOT*AMPL(2,I)) 1614 ANS(3)=ANS(3)+DBLE(CFTOT*AMPL(3,I))+DIMAG(CFTOT*AMPL(3,I)) 1615 ENDIF""" 1616 else: 1617 replace_dict['compute_born']=\ 1618 """C Compute the born, for a specific helicity if asked so. 1619 call %(proc_prefix)ssmatrixhel(P_USER,USERHEL,ANS(0)) 1620 """%matrix_element.rep_dict 1621 replace_dict['set_reference']=\ 1622 """C We chose to use the born evaluation for the reference 1623 call %(proc_prefix)ssmatrix(p,ref)"""%matrix_element.rep_dict 1624 replace_dict['loop_induced_helas_calls'] = "" 1625 replace_dict['loop_induced_finalize'] = "" 1626 replace_dict['loop_induced_setup'] = "" 1627 replace_dict['nctamps_or_nloopamps']='nctamps' 1628 replace_dict['nbornamps_or_nloopamps']='nbornamps' 1629 replace_dict['squaring']='\n'.join(['DO K=1,3', 1630 'ANS(K)=ANS(K)+2.0d0*DBLE(CFTOT*AMPL(K,I)*DCONJG(AMP(J,H)))', 1631 'ENDDO']) 1632 1633 # Write a dummy nsquaredSO.inc which is used in the default 1634 # loop_matrix.f code (even though it does not support split orders evals) 1635 # just to comply with the syntax expected from the external code using MadLoop. 1636 writers.FortranWriter('nsquaredSO.inc').writelines( 1637 """INTEGER NSQUAREDSO 1638 PARAMETER (NSQUAREDSO=0)""") 1639 1640 # Actualize results from the loops computed. Only necessary for 1641 # processes with a born. 1642 actualize_ans=[] 1643 if matrix_element.get('processes')[0].get('has_born'): 1644 actualize_ans.append("DO I=NCTAMPS+1,NLOOPAMPS") 1645 actualize_ans.extend("ANS(%d)=ANS(%d)+AMPL(%d,I)"%(i,i,i) for i \ 1646 in range(1,4)) 1647 actualize_ans.append(\ 1648 "IF((CTMODERUN.NE.-1).AND..NOT.CHECKPHASE.AND.(.NOT.S(I))) THEN") 1649 actualize_ans.append(\ 1650 "WRITE(*,*) '##W03 WARNING Contribution ',I,' is unstable.'") 1651 actualize_ans.extend(["ENDIF","ENDDO"]) 1652 replace_dict['actualize_ans']='\n'.join(actualize_ans) 1653 else: 1654 replace_dict['actualize_ans']=\ 1655 ("""C We add five powers to the reference value to loosen a bit the vanishing pole check. 1656 C IF(.NOT.(CHECKPHASE.OR.(.NOT.HELDOUBLECHECKED)).AND..NOT.%(proc_prefix)sISZERO(ABS(ANS(2))+ABS(ANS(3)),ABS(ANS(1))*(10.0d0**5),-1,H)) THEN 1657 C WRITE(*,*) '##W05 WARNING Found a PS point with a contribution to the single pole.' 1658 C WRITE(*,*) 'Finite contribution = ',ANS(1) 1659 C WRITE(*,*) 'single pole contribution = ',ANS(2) 1660 C WRITE(*,*) 'double pole contribution = ',ANS(3) 1661 C ENDIF""")%replace_dict 1662 1663 # Write out the color matrix 1664 (CMNum,CMDenom) = self.get_color_matrix(matrix_element) 1665 CMWriter=open(pjoin('..','MadLoop5_resources', 1666 '%(proc_prefix)sColorNumFactors.dat'%matrix_element.rep_dict),'w') 1667 for ColorLine in CMNum: 1668 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 1669 CMWriter.close() 1670 CMWriter=open(pjoin('..','MadLoop5_resources', 1671 '%(proc_prefix)sColorDenomFactors.dat'%matrix_element.rep_dict),'w') 1672 for ColorLine in CMDenom: 1673 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 1674 CMWriter.close() 1675 1676 # Write out the helicity configurations 1677 HelConfigs=matrix_element.get_helicity_matrix() 1678 HelConfigWriter=open(pjoin('..','MadLoop5_resources', 1679 '%(proc_prefix)sHelConfigs.dat'%matrix_element.rep_dict),'w') 1680 for HelConfig in HelConfigs: 1681 HelConfigWriter.write(' '.join(['%d'%H for H in HelConfig])+'\n') 1682 HelConfigWriter.close() 1683 1684 # Extract helas calls 1685 loop_amp_helas_calls = fortran_model.get_loop_amp_helas_calls(\ 1686 matrix_element) 1687 # The proc_prefix must be replaced 1688 loop_amp_helas_calls = [lc % matrix_element.rep_dict 1689 for lc in loop_amp_helas_calls] 1690 1691 born_ct_helas_calls, UVCT_helas_calls = \ 1692 fortran_model.get_born_ct_helas_calls(matrix_element) 1693 # In the default output, we do not need to separate these two kind of 1694 # contributions 1695 born_ct_helas_calls = born_ct_helas_calls + UVCT_helas_calls 1696 file = open(os.path.join(self.template_dir,\ 1697 1698 'loop_matrix_standalone.inc')).read() 1699 1700 if matrix_element.get('processes')[0].get('has_born'): 1701 toBeRepaced='loop_helas_calls' 1702 else: 1703 toBeRepaced='loop_induced_helas_calls' 1704 1705 # Decide here wether we need to split the loop_matrix.f file or not. 1706 if (not noSplit and (len(matrix_element.get_all_amplitudes())>1000)): 1707 file=self.split_HELASCALLS(writer,replace_dict,\ 1708 'helas_calls_split.inc',file,born_ct_helas_calls,\ 1709 'born_ct_helas_calls','helas_calls_ampb') 1710 file=self.split_HELASCALLS(writer,replace_dict,\ 1711 'helas_calls_split.inc',file,loop_amp_helas_calls,\ 1712 toBeRepaced,'helas_calls_ampl') 1713 else: 1714 replace_dict['born_ct_helas_calls']='\n'.join(born_ct_helas_calls) 1715 replace_dict[toBeRepaced]='\n'.join(loop_amp_helas_calls) 1716 1717 file = file % replace_dict 1718 1719 loop_calls_finder = re.compile(r'^\s*CALL\S*LOOP\S*') 1720 n_loop_calls = len([call for call in loop_amp_helas_calls if not loop_calls_finder.match(call) is None]) 1721 if writer: 1722 # Write the file 1723 writer.writelines(file) 1724 return n_loop_calls 1725 else: 1726 # Return it to be written along with the others 1727 return n_loop_calls, file
1728
1729 - def write_bornmatrix(self, writer, matrix_element, fortran_model):
1730 """Create the born_matrix.f file for the born process as for a standard 1731 tree-level computation.""" 1732 1733 if not matrix_element.get('processes') or \ 1734 not matrix_element.get('diagrams'): 1735 return 0 1736 1737 if not isinstance(writer, writers.FortranWriter): 1738 raise writers.FortranWriter.FortranWriterError(\ 1739 "writer not FortranWriter") 1740 1741 # For now, we can use the exact same treatment as for tree-level 1742 # computations by redefining here a regular HelasMatrixElementf or the 1743 # born process. 1744 # It is important to make a deepcopy, as we don't want any possible 1745 # treatment on the objects of the bornME to have border effects on 1746 # the content of the LoopHelasMatrixElement object. 1747 bornME = helas_objects.HelasMatrixElement() 1748 for prop in bornME.keys(): 1749 bornME.set(prop,copy.deepcopy(matrix_element.get(prop))) 1750 bornME.set('base_amplitude',None,force=True) 1751 bornME.set('diagrams',copy.deepcopy(\ 1752 matrix_element.get_born_diagrams())) 1753 bornME.set('color_basis',copy.deepcopy(\ 1754 matrix_element.get('born_color_basis'))) 1755 bornME.set('color_matrix',copy.deepcopy(\ 1756 color_amp.ColorMatrix(bornME.get('color_basis')))) 1757 # This is to decide wether once to reuse old wavefunction to store new 1758 # ones (provided they are not used further in the code.) 1759 bornME.optimization = True 1760 return super(LoopProcessExporterFortranSA,self).write_matrix_element_v4( 1761 writer, bornME, fortran_model, 1762 proc_prefix=matrix_element.rep_dict['proc_prefix'])
1763
1764 - def write_born_amps_and_wfs(self, writer, matrix_element, fortran_model, 1765 noSplit=False):
1766 """ Writes out the code for the subroutine MP_BORN_AMPS_AND_WFS which 1767 computes just the external wavefunction and born amplitudes in 1768 multiple precision. """ 1769 1770 if not matrix_element.get('processes') or \ 1771 not matrix_element.get('diagrams'): 1772 return 0 1773 1774 replace_dict = copy.copy(matrix_element.rep_dict) 1775 1776 # For the wavefunction copy, check what suffix is needed for the W array 1777 if matrix_element.get('processes')[0].get('has_born'): 1778 replace_dict['h_w_suffix']=',H' 1779 else: 1780 replace_dict['h_w_suffix']='' 1781 1782 # Extract helas calls 1783 born_amps_and_wfs_calls , uvct_amp_calls = \ 1784 fortran_model.get_born_ct_helas_calls(matrix_element, include_CT=True) 1785 # In the default output, these two kind of contributions do not need to 1786 # be differentiated 1787 born_amps_and_wfs_calls = born_amps_and_wfs_calls + uvct_amp_calls 1788 1789 # Turn these HELAS calls to the multiple-precision version of the HELAS 1790 # subroutines. 1791 self.turn_to_mp_calls(born_amps_and_wfs_calls) 1792 1793 file = open(os.path.join(self.template_dir,\ 1794 'mp_born_amps_and_wfs.inc')).read() 1795 # Decide here wether we need to split the loop_matrix.f file or not. 1796 if (not noSplit and (len(matrix_element.get_all_amplitudes())>2000)): 1797 file=self.split_HELASCALLS(writer,replace_dict,\ 1798 'mp_helas_calls_split.inc',file,\ 1799 born_amps_and_wfs_calls,'born_amps_and_wfs_calls',\ 1800 'mp_helas_calls') 1801 else: 1802 replace_dict['born_amps_and_wfs_calls']=\ 1803 '\n'.join(born_amps_and_wfs_calls) 1804 1805 file = file % replace_dict 1806 if writer: 1807 # Write the file 1808 writer.writelines(file) 1809 else: 1810 # Return it to be written along with the others 1811 return file 1812 1813 #=============================================================================== 1814 # LoopProcessOptimizedExporterFortranSA 1815 #=============================================================================== 1816
1817 -class LoopProcessOptimizedExporterFortranSA(LoopProcessExporterFortranSA):
1818 """Class to take care of exporting a set of loop matrix elements in the 1819 Fortran format which exploits the Pozzorini method of representing 1820 the loop numerators as polynomial to render its evaluations faster.""" 1821 1822 template_dir=os.path.join(_file_path,'iolibs/template_files/loop_optimized') 1823 # The option below controls wether one wants to group together in one single 1824 # CutTools/TIR call the loops with same denominator structure 1825 forbid_loop_grouping = False 1826 1827 # List of potential TIR library one wants to link to. 1828 # Golem and Samurai will typically get obtained from gosam_contrib 1829 # which might also contain a version of ninja. We must therefore 1830 # make sure that ninja appears first in the list of -L because 1831 # it is the tool for which the user is most susceptible of 1832 # using a standalone verison independent of gosam_contrib 1833 all_tir=['pjfry','iregi','ninja','golem','samurai','collier'] 1834
1835 - def __init__(self, dir_path = "", opt=None):
1836 """Initiate the LoopProcessOptimizedExporterFortranSA with directory 1837 information on where to find all the loop-related source files, 1838 like CutTools and TIR""" 1839 1840 super(LoopProcessOptimizedExporterFortranSA,self).__init__(dir_path, opt) 1841 1842 # TIR available ones 1843 self.tir_available_dict={'pjfry':True,'iregi':True,'golem':True, 1844 'samurai':True,'ninja':True,'collier':True} 1845 1846 for tir in self.all_tir: 1847 tir_dir="%s_dir"%tir 1848 if tir_dir in self.opt and not self.opt[tir_dir] is None: 1849 # Make sure to defer the 'local path' to the current MG5aMC root. 1850 tir_path = self.opt[tir_dir].strip() 1851 if tir_path.startswith('.'): 1852 tir_path = os.path.abspath(pjoin(MG5DIR,tir_path)) 1853 setattr(self,tir_dir,tir_path) 1854 else: 1855 setattr(self,tir_dir,'')
1856
1857 - def copy_template(self, model):
1858 """Additional actions needed to setup the Template. 1859 """ 1860 1861 super(LoopProcessOptimizedExporterFortranSA, self).copy_template(model) 1862 1863 self.loop_optimized_additional_template_setup()
1864
1865 - def get_context(self,matrix_element, **opts):
1866 """ Additional contextual information which needs to be created for 1867 the optimized output.""" 1868 1869 context = LoopProcessExporterFortranSA.get_context(self, matrix_element, 1870 **opts) 1871 1872 # For now assume Ninja always supports quadruple precision 1873 try: 1874 context['ninja_supports_quad_prec'] = \ 1875 misc.get_ninja_quad_prec_support(getattr(self,'ninja_dir')) 1876 except AttributeError: 1877 context['ninja_supports_quad_prec'] = False 1878 1879 for tir in self.all_tir: 1880 context['%s_available'%tir]=self.tir_available_dict[tir] 1881 # safety check 1882 if tir not in ['golem','pjfry','iregi','samurai','ninja','collier']: 1883 raise MadGraph5Error("%s was not a TIR currently interfaced."%tir_name) 1884 1885 return context
1886
1888 """ Perform additional actions specific for this class when setting 1889 up the template with the copy_template function.""" 1890 1891 # We must link the TIR to the Library folder of the active Template 1892 link_tir_libs=[] 1893 tir_libs=[] 1894 tir_include=[] 1895 1896 for tir in self.all_tir: 1897 tir_dir="%s_dir"%tir 1898 libpath=getattr(self,tir_dir) 1899 libname="lib%s.a"%tir 1900 tir_name=tir 1901 libpath = self.link_TIR(os.path.join(self.dir_path, 'lib'), 1902 libpath,libname,tir_name=tir_name) 1903 if libpath != "": 1904 if tir in ['ninja','pjfry','golem','samurai','collier']: 1905 # It is cleaner to use the original location of the libraries 1906 link_tir_libs.append('-L%s/ -l%s'%(libpath,tir)) 1907 tir_libs.append('%s/lib%s.$(libext)'%(libpath,tir)) 1908 # For Ninja, we must also link against OneLoop. 1909 if tir in ['ninja']: 1910 if not any(os.path.isfile(pjoin(libpath,'libavh_olo.%s'%ext)) 1911 for ext in ['a','dylib','so']): 1912 raise MadGraph5Error( 1913 "The OneLOop library 'libavh_olo.(a|dylib|so)' could no be found in path '%s'. Please place a symlink to it there."%libpath) 1914 link_tir_libs.append('-L%s/ -l%s'%(libpath,'avh_olo')) 1915 tir_libs.append('%s/lib%s.$(libext)'%(libpath,'avh_olo')) 1916 if tir in ['ninja','golem', 'samurai','collier']: 1917 trgt_path = pjoin(os.path.dirname(libpath),'include') 1918 if os.path.isdir(trgt_path): 1919 to_include = misc.find_includes_path(trgt_path, 1920 self.include_names[tir]) 1921 else: 1922 to_include = None 1923 # Special possible location for collier 1924 if to_include is None and tir=='collier': 1925 to_include = misc.find_includes_path( 1926 pjoin(libpath,'modules'),self.include_names[tir]) 1927 if to_include is None: 1928 logger.error( 1929 'Could not find the include directory for %s, looking in %s.\n' % (tir, str(trgt_path))+ 1930 'Generation carries on but you will need to edit the include path by hand in the makefiles.') 1931 to_include = '<Not_found_define_it_yourself>' 1932 tir_include.append('-I %s'%str(to_include)) 1933 # To be able to easily compile a MadLoop library using 1934 # makefiles built outside of the MG5_aMC framework 1935 # (such as what is done with the Sherpa interface), we 1936 # place here an easy handle on the golem includes 1937 name_map = {'golem':'golem95','samurai':'samurai', 1938 'ninja':'ninja','collier':'collier'} 1939 ln(to_include, starting_dir=pjoin(self.dir_path,'lib'), 1940 name='%s_include'%name_map[tir],abspath=True) 1941 ln(libpath, starting_dir=pjoin(self.dir_path,'lib'), 1942 name='%s_lib'%name_map[tir],abspath=True) 1943 else : 1944 link_tir_libs.append('-l%s'%tir) 1945 tir_libs.append('$(LIBDIR)lib%s.$(libext)'%tir) 1946 1947 MadLoop_makefile_definitions = pjoin(self.dir_path,'SubProcesses', 1948 'MadLoop_makefile_definitions') 1949 if os.path.isfile(MadLoop_makefile_definitions): 1950 os.remove(MadLoop_makefile_definitions) 1951 1952 calls = self.write_loop_makefile_definitions( 1953 writers.MakefileWriter(MadLoop_makefile_definitions), 1954 link_tir_libs,tir_libs, tir_include=tir_include) 1955 1956 # Finally overwrite MadLoopCommons.f now that we know the availibility of 1957 # COLLIER. 1958 MadLoopCommon = open(os.path.join(self.loop_dir,'StandAlone', 1959 "SubProcesses","MadLoopCommons.inc")).read() 1960 writer = writers.FortranWriter(os.path.join(self.dir_path, 1961 "SubProcesses","MadLoopCommons.f")) 1962 writer.writelines(MadLoopCommon%{ 1963 'print_banner_commands':self.MadLoop_banner}, context={ 1964 'collier_available':self.tir_available_dict['collier']}) 1965 writer.close()
1966 1978 1979 2107
2108 - def set_group_loops(self, matrix_element):
2109 """ Decides whether we must group loops or not for this matrix element""" 2110 2111 # Decide if loops sharing same denominator structures have to be grouped 2112 # together or not. 2113 if self.forbid_loop_grouping: 2114 self.group_loops = False 2115 else: 2116 self.group_loops = (not self.get_context(matrix_element)['ComputeColorFlows'])\ 2117 and matrix_element.get('processes')[0].get('has_born') 2118 2119 return self.group_loops
2120
2121 - def finalize(self, matrix_element, cmdhistory, MG5options, outputflag):
2122 """create the global information for loops""" 2123 2124 super(LoopProcessOptimizedExporterFortranSA,self).finalize(matrix_element, 2125 cmdhistory, MG5options, outputflag) 2126 self.write_global_specs(matrix_element)
2127 2128 2129
2130 - def write_loop_matrix_element_v4(self, writer, matrix_element, fortran_model, 2131 group_number = None, proc_id = None, config_map = None):
2132 """ Writes loop_matrix.f, CT_interface.f,TIR_interface.f,GOLEM_inteface.f 2133 and loop_num.f only but with the optimized FortranModel. 2134 The arguments group_number and proc_id are just for the LoopInduced 2135 output with MadEvent and only used in get_ME_identifier.""" 2136 2137 # Warn the user that the 'matrix' output where all relevant code is 2138 # put together in a single file is not supported in this loop output. 2139 if writer: 2140 raise MadGraph5Error('Matrix output mode no longer supported.') 2141 2142 if not isinstance(fortran_model,\ 2143 helas_call_writers.FortranUFOHelasCallWriter): 2144 raise MadGraph5Error('The optimized loop fortran output can only'+\ 2145 ' work with a UFO Fortran model') 2146 OptimizedFortranModel=\ 2147 helas_call_writers.FortranUFOHelasCallWriterOptimized(\ 2148 fortran_model.get('model'),False) 2149 2150 2151 if not matrix_element.get('processes')[0].get('has_born') and \ 2152 not self.compute_color_flows: 2153 logger.debug("Color flows will be employed despite the option"+\ 2154 " 'loop_color_flows' being set to False because it is necessary"+\ 2155 " for optimizations.") 2156 2157 # Compute the analytical information of the loop wavefunctions in the 2158 # loop helas matrix elements using the cached aloha model to reuse 2159 # as much as possible the aloha computations already performed for 2160 # writing out the aloha fortran subroutines. 2161 matrix_element.compute_all_analytic_information( 2162 self.get_aloha_model(matrix_element.get('processes')[0].get('model'))) 2163 2164 self.set_group_loops(matrix_element) 2165 2166 # Initialize a general replacement dictionary with entries common to 2167 # many files generated here. 2168 matrix_element.rep_dict = LoopProcessExporterFortranSA.\ 2169 generate_general_replace_dict(self, matrix_element, 2170 group_number = group_number, proc_id = proc_id) 2171 2172 # and those specific to the optimized output 2173 self.set_optimized_output_specific_replace_dict_entries(matrix_element) 2174 2175 # Create the necessary files for the loop matrix element subroutine 2176 proc_prefix_writer = writers.FortranWriter('proc_prefix.txt','w') 2177 proc_prefix_writer.write(matrix_element.rep_dict['proc_prefix']) 2178 proc_prefix_writer.close() 2179 2180 filename = 'loop_matrix.f' 2181 calls = self.write_loopmatrix(writers.FortranWriter(filename), 2182 matrix_element, 2183 OptimizedFortranModel) 2184 2185 filename = 'check_sa.f' 2186 self.write_check_sa(writers.FortranWriter(filename),matrix_element) 2187 2188 filename = 'polynomial.f' 2189 calls = self.write_polynomial_subroutines( 2190 writers.FortranWriter(filename), 2191 matrix_element) 2192 2193 filename = 'improve_ps.f' 2194 calls = self.write_improve_ps(writers.FortranWriter(filename), 2195 matrix_element) 2196 2197 filename = 'CT_interface.f' 2198 self.write_CT_interface(writers.FortranWriter(filename),\ 2199 matrix_element) 2200 2201 filename = 'TIR_interface.f' 2202 self.write_TIR_interface(writers.FortranWriter(filename), 2203 matrix_element) 2204 2205 if 'golem' in self.tir_available_dict and self.tir_available_dict['golem']: 2206 filename = 'GOLEM_interface.f' 2207 self.write_GOLEM_interface(writers.FortranWriter(filename), 2208 matrix_element) 2209 2210 if 'collier' in self.tir_available_dict and self.tir_available_dict['collier']: 2211 filename = 'COLLIER_interface.f' 2212 self.write_COLLIER_interface(writers.FortranWriter(filename), 2213 matrix_element) 2214 2215 filename = 'loop_num.f' 2216 self.write_loop_num(writers.FortranWriter(filename),\ 2217 matrix_element,OptimizedFortranModel) 2218 2219 filename = 'mp_compute_loop_coefs.f' 2220 self.write_mp_compute_loop_coefs(writers.FortranWriter(filename),\ 2221 matrix_element,OptimizedFortranModel) 2222 2223 if self.get_context(matrix_element)['ComputeColorFlows']: 2224 filename = 'compute_color_flows.f' 2225 self.write_compute_color_flows(writers.FortranWriter(filename), 2226 matrix_element, config_map = config_map) 2227 2228 # Extract number of external particles 2229 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 2230 filename = 'nexternal.inc' 2231 self.write_nexternal_file(writers.FortranWriter(filename), 2232 nexternal, ninitial) 2233 2234 # Write general process information 2235 filename = 'process_info.inc' 2236 self.write_process_info_file(writers.FortranWriter(filename), 2237 matrix_element) 2238 2239 if self.get_context(matrix_element)['TIRCaching']: 2240 filename = 'tir_cache_size.inc' 2241 self.write_tir_cache_size_include(writers.FortranWriter(filename)) 2242 2243 return calls
2244
2245 - def set_optimized_output_specific_replace_dict_entries(self, matrix_element):
2246 """ Specify the entries of the replacement dictionary which are specific 2247 to the optimized output and only relevant to it (the more general entries 2248 are set in the the mother class LoopProcessExporterFortranSA.""" 2249 2250 max_loop_rank=matrix_element.get_max_loop_rank() 2251 matrix_element.rep_dict['maxrank']=max_loop_rank 2252 matrix_element.rep_dict['loop_max_coefs']=\ 2253 q_polynomial.get_number_of_coefs_for_rank(max_loop_rank) 2254 max_loop_vertex_rank=matrix_element.get_max_loop_vertex_rank() 2255 matrix_element.rep_dict['vertex_max_coefs']=\ 2256 q_polynomial.get_number_of_coefs_for_rank(max_loop_vertex_rank) 2257 2258 matrix_element.rep_dict['nloopwavefuncs']=\ 2259 matrix_element.get_number_of_loop_wavefunctions() 2260 max_spin=matrix_element.get_max_loop_particle_spin() 2261 2262 matrix_element.rep_dict['max_lwf_size']= 4 if max_spin <=3 else 16 2263 matrix_element.rep_dict['nloops']=len(\ 2264 [1 for ldiag in matrix_element.get_loop_diagrams() for \ 2265 lamp in ldiag.get_loop_amplitudes()]) 2266 2267 if self.set_group_loops(matrix_element): 2268 matrix_element.rep_dict['nloop_groups']=\ 2269 len(matrix_element.get('loop_groups')) 2270 else: 2271 matrix_element.rep_dict['nloop_groups']=\ 2272 matrix_element.rep_dict['nloops']
2273
2274 - def write_loop_num(self, writer, matrix_element,fortran_model):
2275 """ Create the file containing the core subroutine called by CutTools 2276 which contains the Helas calls building the loop""" 2277 2278 replace_dict=copy.copy(matrix_element.rep_dict) 2279 2280 file = open(os.path.join(self.template_dir,'loop_num.inc')).read() 2281 file = file % replace_dict 2282 writer.writelines(file,context=self.get_context(matrix_element))
2283
2284 - def write_CT_interface(self, writer, matrix_element):
2285 """ We can re-use the mother one for the loop optimized output.""" 2286 LoopProcessExporterFortranSA.write_CT_interface(\ 2287 self, writer, matrix_element,optimized_output=True)
2288
2289 - def write_TIR_interface(self, writer, matrix_element):
2290 """ Create the file TIR_interface.f which does NOT contain the subroutine 2291 defining the loop HELAS-like calls along with the general interfacing 2292 subroutine. """ 2293 2294 # First write TIR_interface which interfaces MG5 with TIR. 2295 replace_dict=copy.copy(matrix_element.rep_dict) 2296 2297 file = open(os.path.join(self.template_dir,'TIR_interface.inc')).read() 2298 2299 # Check which loops have an Higgs effective vertex so as to correctly 2300 # implement CutTools limitation 2301 loop_groups = matrix_element.get('loop_groups') 2302 has_HEFT_vertex = [False]*len(loop_groups) 2303 for i, (denom_structure, loop_amp_list) in enumerate(loop_groups): 2304 for lamp in loop_amp_list: 2305 final_lwf = lamp.get_final_loop_wavefunction() 2306 while not final_lwf is None: 2307 # We define here an HEFT vertex as any vertex built up from 2308 # only massless vectors and massive scalars (at least one of each) 2309 # We ask for massive scalars in part to remove the gluon ghost false positive. 2310 scalars = len([1 for wf in final_lwf.get('mothers') if 2311 wf.get('spin')==1 and wf.get('mass')!='ZERO']) 2312 vectors = len([1 for wf in final_lwf.get('mothers') if 2313 wf.get('spin')==3 and wf.get('mass')=='ZERO']) 2314 if scalars>=1 and vectors>=1 and \ 2315 scalars+vectors == len(final_lwf.get('mothers')): 2316 has_HEFT_vertex[i] = True 2317 break 2318 final_lwf = final_lwf.get_loop_mother() 2319 else: 2320 continue 2321 break 2322 2323 has_HEFT_list = [] 2324 chunk_size = 9 2325 for k in range(0, len(has_HEFT_vertex), chunk_size): 2326 has_HEFT_list.append("DATA (HAS_AN_HEFT_VERTEX(I),I=%6r,%6r) /%s/" % \ 2327 (k + 1, min(k + chunk_size, len(has_HEFT_vertex)), 2328 ','.join(['.TRUE.' if l else '.FALSE.' for l in 2329 has_HEFT_vertex[k:k + chunk_size]]))) 2330 replace_dict['has_HEFT_list'] = '\n'.join(has_HEFT_list) 2331 2332 file = file % replace_dict 2333 2334 FPR = q_polynomial.FortranPolynomialRoutines( 2335 replace_dict['maxrank'],coef_format=replace_dict['complex_dp_format'],\ 2336 sub_prefix=replace_dict['proc_prefix']) 2337 if self.tir_available_dict['pjfry']: 2338 file += '\n\n'+FPR.write_pjfry_mapping() 2339 if self.tir_available_dict['iregi']: 2340 file += '\n\n'+FPR.write_iregi_mapping() 2341 2342 if writer: 2343 writer.writelines(file,context=self.get_context(matrix_element)) 2344 else: 2345 return file
2346
2347 - def write_COLLIER_interface(self, writer, matrix_element):
2348 """ Create the file COLLIER_interface.f""" 2349 2350 # First write GOLEM_interface which interfaces MG5 with TIR. 2351 replace_dict=copy.copy(matrix_element.rep_dict) 2352 2353 file = open(os.path.join(self.template_dir,'COLLIER_interface.inc')).read() 2354 2355 FPR = q_polynomial.FortranPolynomialRoutines(replace_dict['maxrank'],\ 2356 coef_format=replace_dict['complex_dp_format'],\ 2357 sub_prefix=replace_dict['proc_prefix']) 2358 map_definition = [] 2359 collier_map = FPR.get_COLLIER_mapping() 2360 2361 chunk_size = 10 2362 for map_name, indices_list in \ 2363 [('COEFMAP_ZERO',[c[0] for c in collier_map]), 2364 ('COEFMAP_ONE',[c[1] for c in collier_map]), 2365 ('COEFMAP_TWO',[c[2] for c in collier_map]), 2366 ('COEFMAP_THREE',[c[3] for c in collier_map])]: 2367 for k in range(0, len(indices_list), chunk_size): 2368 map_definition.append("DATA (%s(I),I=%3r,%3r) /%s/" % \ 2369 (map_name,k, min(k + chunk_size, len(indices_list))-1, 2370 ','.join('%2r'%ind for ind in indices_list[k:k + chunk_size]))) 2371 2372 replace_dict['collier_coefmap'] = '\n'.join(map_definition) 2373 2374 file = file % replace_dict 2375 2376 if writer: 2377 writer.writelines(file,context=self.get_context(matrix_element)) 2378 else: 2379 return file
2380
2381 - def write_GOLEM_interface(self, writer, matrix_element):
2382 """ Create the file GOLEM_interface.f which does NOT contain the subroutine 2383 defining the loop HELAS-like calls along with the general interfacing 2384 subroutine. """ 2385 2386 # First write GOLEM_interface which interfaces MG5 with TIR. 2387 replace_dict=copy.copy(matrix_element.rep_dict) 2388 2389 # We finalize TIR result differently wether we used the built-in 2390 # squaring against the born. 2391 if not self.get_context(matrix_element)['AmplitudeReduction']: 2392 replace_dict['loop_induced_sqsoindex']=',SQSOINDEX' 2393 else: 2394 replace_dict['loop_induced_sqsoindex']='' 2395 2396 file = open(os.path.join(self.template_dir,'GOLEM_interface.inc')).read() 2397 2398 file = file % replace_dict 2399 2400 FPR = q_polynomial.FortranPolynomialRoutines(replace_dict['maxrank'],\ 2401 coef_format=replace_dict['complex_dp_format'],\ 2402 sub_prefix=replace_dict['proc_prefix']) 2403 2404 file += '\n\n'+FPR.write_golem95_mapping() 2405 2406 if writer: 2407 writer.writelines(file,context=self.get_context(matrix_element)) 2408 else: 2409 return file
2410
2411 - def write_polynomial_subroutines(self,writer,matrix_element):
2412 """ Subroutine to create all the subroutines relevant for handling 2413 the polynomials representing the loop numerator """ 2414 2415 # First create 'loop_max_coefs.inc' 2416 IncWriter=writers.FortranWriter('loop_max_coefs.inc','w') 2417 IncWriter.writelines("""INTEGER LOOPMAXCOEFS 2418 PARAMETER (LOOPMAXCOEFS=%(loop_max_coefs)d)""" 2419 %matrix_element.rep_dict) 2420 2421 # Then coef_specs directly in DHELAS if it does not exist already 2422 # 'coef_specs.inc'. If several processes exported different files there, 2423 # it is fine because the overall maximum value will overwrite it in the 2424 # end 2425 coef_specs_path = pjoin(self.dir_path, 'Source','DHELAS','coef_specs.inc') 2426 if not os.path.isfile(coef_specs_path): 2427 IncWriter=writers.FortranWriter(coef_specs_path,'w') 2428 IncWriter.writelines("""INTEGER MAXLWFSIZE 2429 PARAMETER (MAXLWFSIZE=%(max_lwf_size)d) 2430 INTEGER VERTEXMAXCOEFS 2431 PARAMETER (VERTEXMAXCOEFS=%(vertex_max_coefs)d)"""\ 2432 %matrix_element.rep_dict) 2433 IncWriter.close() 2434 2435 # List of all subroutines to place there 2436 subroutines=[] 2437 2438 # Start from the routine in the template 2439 replace_dict = copy.copy(matrix_element.rep_dict) 2440 2441 dp_routine = open(os.path.join(self.template_dir,'polynomial.inc')).read() 2442 mp_routine = open(os.path.join(self.template_dir,'polynomial.inc')).read() 2443 # The double precision version of the basic polynomial routines, such as 2444 # create_loop_coefs 2445 replace_dict['complex_format'] = replace_dict['complex_dp_format'] 2446 replace_dict['real_format'] = replace_dict['real_dp_format'] 2447 replace_dict['mp_prefix'] = '' 2448 replace_dict['kind'] = 8 2449 replace_dict['zero_def'] = '0.0d0' 2450 replace_dict['one_def'] = '1.0d0' 2451 dp_routine = dp_routine % replace_dict 2452 # The quadruple precision version of the basic polynomial routines 2453 replace_dict['complex_format'] = replace_dict['complex_mp_format'] 2454 replace_dict['real_format'] = replace_dict['real_mp_format'] 2455 replace_dict['mp_prefix'] = 'MP_' 2456 replace_dict['kind'] = 16 2457 replace_dict['zero_def'] = '0.0e0_16' 2458 replace_dict['one_def'] = '1.0e0_16' 2459 mp_routine = mp_routine % replace_dict 2460 subroutines.append(dp_routine) 2461 subroutines.append(mp_routine) 2462 2463 # Initialize the polynomial routine writer 2464 poly_writer=q_polynomial.FortranPolynomialRoutines( 2465 matrix_element.get_max_loop_rank(), 2466 updater_max_rank = matrix_element.get_max_loop_vertex_rank(), 2467 sub_prefix=replace_dict['proc_prefix'], 2468 proc_prefix=replace_dict['proc_prefix'], 2469 mp_prefix='') 2470 # Write the polynomial constant module common to all 2471 writer.writelines(poly_writer.write_polynomial_constant_module()+'\n') 2472 2473 mp_poly_writer=q_polynomial.FortranPolynomialRoutines( 2474 matrix_element.get_max_loop_rank(), 2475 updater_max_rank = matrix_element.get_max_loop_vertex_rank(), 2476 coef_format='complex*32', sub_prefix='MP_'+replace_dict['proc_prefix'], 2477 proc_prefix=replace_dict['proc_prefix'], mp_prefix='MP_') 2478 # The eval subroutine 2479 subroutines.append(poly_writer.write_polynomial_evaluator()) 2480 subroutines.append(mp_poly_writer.write_polynomial_evaluator()) 2481 # The add coefs subroutine 2482 subroutines.append(poly_writer.write_add_coefs()) 2483 subroutines.append(mp_poly_writer.write_add_coefs()) 2484 # The merging one for creating the loop coefficients 2485 subroutines.append(poly_writer.write_wl_merger()) 2486 subroutines.append(mp_poly_writer.write_wl_merger()) 2487 for wl_update in matrix_element.get_used_wl_updates(): 2488 # We pick here the most appropriate way of computing the 2489 # tensor product depending on the rank of the two tensors. 2490 # The various choices below come out from a careful comparison of 2491 # the different methods using the valgrind profiler 2492 if wl_update[0]==wl_update[1]==1 or wl_update[0]==0 or wl_update[1]==0: 2493 # If any of the rank is 0, or if they are both equal to 1, 2494 # then we are better off using the full expanded polynomial, 2495 # and let the compiler optimize it. 2496 subroutines.append(poly_writer.write_expanded_wl_updater(\ 2497 wl_update[0],wl_update[1])) 2498 subroutines.append(mp_poly_writer.write_expanded_wl_updater(\ 2499 wl_update[0],wl_update[1])) 2500 elif wl_update[0] >= wl_update[1]: 2501 # If the loop polynomial is larger then we will filter and loop 2502 # over the vertex coefficients first. The smallest product for 2503 # which the routines below could be used is then 2504 # loop_rank_2 x vertex_rank_1 2505 subroutines.append(poly_writer.write_compact_wl_updater(\ 2506 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=True)) 2507 subroutines.append(mp_poly_writer.write_compact_wl_updater(\ 2508 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=True)) 2509 else: 2510 # This happens only when the rank of the updater (vertex coef) 2511 # is larger than the one of the loop coef and none of them is 2512 # zero. This never happens in renormalizable theories but it 2513 # can happen in the HEFT ones or other effective ones. In this 2514 # case the typicaly use of this routine if for the product 2515 # loop_rank_1 x vertex_rank_2 2516 subroutines.append(poly_writer.write_compact_wl_updater(\ 2517 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=False)) 2518 subroutines.append(mp_poly_writer.write_compact_wl_updater(\ 2519 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=False)) 2520 2521 writer.writelines('\n\n'.join(subroutines), 2522 context=self.get_context(matrix_element))
2523
2524 - def write_mp_compute_loop_coefs(self, writer, matrix_element, fortran_model):
2525 """Create the write_mp_compute_loop_coefs.f file.""" 2526 2527 if not matrix_element.get('processes') or \ 2528 not matrix_element.get('diagrams'): 2529 return 0 2530 2531 # Set lowercase/uppercase Fortran code 2532 2533 writers.FortranWriter.downcase = False 2534 2535 replace_dict = copy.copy(matrix_element.rep_dict) 2536 2537 # Extract helas calls 2538 squared_orders = matrix_element.get_squared_order_contribs() 2539 split_orders = matrix_element.get('processes')[0].get('split_orders') 2540 2541 born_ct_helas_calls , uvct_helas_calls = \ 2542 fortran_model.get_born_ct_helas_calls(matrix_element, 2543 squared_orders=squared_orders, split_orders=split_orders) 2544 self.turn_to_mp_calls(born_ct_helas_calls) 2545 self.turn_to_mp_calls(uvct_helas_calls) 2546 coef_construction, coef_merging = fortran_model.get_coef_construction_calls(\ 2547 matrix_element,group_loops=self.group_loops, 2548 squared_orders=squared_orders,split_orders=split_orders) 2549 # The proc_prefix must be replaced 2550 coef_construction = [c % matrix_element.rep_dict for c 2551 in coef_construction] 2552 self.turn_to_mp_calls(coef_construction) 2553 self.turn_to_mp_calls(coef_merging) 2554 2555 file = open(os.path.join(self.template_dir,\ 2556 'mp_compute_loop_coefs.inc')).read() 2557 2558 # Setup the contextual environment which is used in the splitting 2559 # functions below 2560 context = self.get_context(matrix_element) 2561 file=self.split_HELASCALLS(writer,replace_dict,\ 2562 'mp_helas_calls_split.inc',file,born_ct_helas_calls,\ 2563 'mp_born_ct_helas_calls','mp_helas_calls_ampb', 2564 required_so_broadcaster = 'MP_CT_REQ_SO_DONE', 2565 continue_label = 2000, 2566 momenta_array_name = 'MP_P', 2567 context=context) 2568 file=self.split_HELASCALLS(writer,replace_dict,\ 2569 'mp_helas_calls_split.inc',file,uvct_helas_calls,\ 2570 'mp_uvct_helas_calls','mp_helas_calls_uvct', 2571 required_so_broadcaster = 'MP_UVCT_REQ_SO_DONE', 2572 continue_label = 3000, 2573 momenta_array_name = 'MP_P', 2574 context=context) 2575 file=self.split_HELASCALLS(writer,replace_dict,\ 2576 'mp_helas_calls_split.inc',file,coef_construction,\ 2577 'mp_coef_construction','mp_coef_construction', 2578 required_so_broadcaster = 'MP_LOOP_REQ_SO_DONE', 2579 continue_label = 4000, 2580 momenta_array_name = 'MP_P', 2581 context=context) 2582 2583 replace_dict['mp_coef_merging']='\n'.join(coef_merging) 2584 2585 file = file % replace_dict 2586 2587 # Write the file 2588 writer.writelines(file,context=context)
2589
2590 - def write_color_matrix_data_file(self, writer, col_matrix):
2591 """Writes out the files (Loop|Born)ColorFlowMatrix.dat corresponding 2592 to the color coefficients for JAMP(L|B)*JAMP(L|B).""" 2593 2594 res = [] 2595 for line in range(len(col_matrix._col_basis1)): 2596 numerators = [] 2597 denominators = [] 2598 for row in range(len(col_matrix._col_basis2)): 2599 coeff = col_matrix.col_matrix_fixed_Nc[(line,row)] 2600 numerators.append('%6r'%coeff[0].numerator) 2601 denominators.append('%6r'%( 2602 coeff[0].denominator*(-1 if coeff[1] else 1))) 2603 res.append(' '.join(numerators)) 2604 res.append(' '.join(denominators)) 2605 2606 res.append('EOF') 2607 2608 writer.writelines('\n'.join(res))
2609
2610 - def write_color_flow_coefs_data_file(self, writer, color_amplitudes, 2611 color_basis):
2612 """ Writes the file '(Loop|Born)ColorFlowCoefs.dat using the coefficients 2613 list of the color_amplitudes in the argument of this function.""" 2614 2615 my_cs = color.ColorString() 2616 2617 res = [] 2618 2619 for jamp_number, coeff_list in enumerate(color_amplitudes): 2620 my_cs.from_immutable(sorted(color_basis.keys())[jamp_number]) 2621 # Order the ColorString so that its ordering is canonical. 2622 ordered_cs = color.ColorFactor([my_cs]).full_simplify()[0] 2623 res.append('%d # Coefficient for flow number %d with expr. %s'\ 2624 %(len(coeff_list), jamp_number+1, repr(ordered_cs))) 2625 # A line element is a tuple (numerator, denominator, amplitude_id) 2626 line_element = [] 2627 2628 for (coefficient, amp_number) in coeff_list: 2629 coef = self.cat_coeff(\ 2630 coefficient[0],coefficient[1],coefficient[2],coefficient[3]) 2631 line_element.append((coef[0].numerator, 2632 coef[0].denominator*(-1 if coef[1] else 1),amp_number)) 2633 # Sort them by growing amplitude number 2634 line_element.sort(key=lambda el:el[2]) 2635 2636 for i in range(3): 2637 res.append(' '.join('%6r'%elem[i] for elem in line_element)) 2638 2639 res.append('EOF') 2640 writer.writelines('\n'.join(res))
2641
2642 - def write_compute_color_flows(self, writer, matrix_element, config_map):
2643 """Writes the file compute_color_flows.f which uses the AMPL results 2644 from a common block to project them onto the color flow space so as 2645 to compute the JAMP quantities. For loop induced processes, this file 2646 will also contain a subroutine computing AMPL**2 for madevent 2647 multichanneling.""" 2648 2649 loop_col_amps = matrix_element.get_loop_color_amplitudes() 2650 matrix_element.rep_dict['nLoopFlows'] = len(loop_col_amps) 2651 2652 dat_writer = open(pjoin('..','MadLoop5_resources', 2653 '%(proc_prefix)sLoopColorFlowCoefs.dat' 2654 %matrix_element.rep_dict),'w') 2655 self.write_color_flow_coefs_data_file(dat_writer, 2656 loop_col_amps, matrix_element.get('loop_color_basis')) 2657 dat_writer.close() 2658 2659 dat_writer = open(pjoin('..','MadLoop5_resources', 2660 '%(proc_prefix)sLoopColorFlowMatrix.dat' 2661 %matrix_element.rep_dict),'w') 2662 self.write_color_matrix_data_file(dat_writer, 2663 matrix_element.get('color_matrix')) 2664 dat_writer.close() 2665 2666 if matrix_element.get('processes')[0].get('has_born'): 2667 born_col_amps = matrix_element.get_born_color_amplitudes() 2668 matrix_element.rep_dict['nBornFlows'] = len(born_col_amps) 2669 dat_writer = open(pjoin('..','MadLoop5_resources', 2670 '%(proc_prefix)sBornColorFlowCoefs.dat' 2671 %matrix_element.rep_dict),'w') 2672 self.write_color_flow_coefs_data_file(dat_writer, 2673 born_col_amps, matrix_element.get('born_color_basis')) 2674 dat_writer.close() 2675 2676 dat_writer = open(pjoin('..','MadLoop5_resources', 2677 '%(proc_prefix)sBornColorFlowMatrix.dat' 2678 %matrix_element.rep_dict),'w') 2679 self.write_color_matrix_data_file(dat_writer, 2680 color_amp.ColorMatrix(matrix_element.get('born_color_basis'))) 2681 dat_writer.close() 2682 else: 2683 matrix_element.rep_dict['nBornFlows'] = 0 2684 2685 replace_dict = copy.copy(matrix_element.rep_dict) 2686 2687 # The following variables only have to be defined for the LoopInduced 2688 # output for madevent. 2689 if self.get_context(matrix_element)['MadEventOutput']: 2690 self.get_amp2_lines(matrix_element, replace_dict, config_map) 2691 else: 2692 replace_dict['config_map_definition'] = '' 2693 replace_dict['config_index_map_definition'] = '' 2694 replace_dict['nmultichannels'] = 0 2695 replace_dict['nmultichannel_configs'] = 0 2696 2697 # The nmultichannels entry will be used in the matrix<i> wrappers as 2698 # well, so we add it to the general_replace_dict too. 2699 matrix_element.rep_dict['nmultichannels'] = \ 2700 replace_dict['nmultichannels'] 2701 matrix_element.rep_dict['nmultichannel_configs'] = \ 2702 replace_dict['nmultichannel_configs'] 2703 2704 2705 file = open(os.path.join(self.template_dir,\ 2706 'compute_color_flows.inc')).read()%replace_dict 2707 2708 writer.writelines(file,context=self.get_context(matrix_element))
2709
2710 - def write_global_specs(self, matrix_element_list, output_path=None):
2711 """ From the list of matrix element, or the single matrix element, derive 2712 the global quantities to write in global_coef_specs.inc""" 2713 2714 if isinstance(matrix_element_list, (group_subprocs.SubProcessGroupList, 2715 loop_helas_objects.LoopHelasProcess)): 2716 matrix_element_list = matrix_element_list.get_matrix_elements() 2717 2718 if isinstance(matrix_element_list, list): 2719 me_list = matrix_element_list 2720 else: 2721 me_list = [matrix_element_list] 2722 2723 if output_path is None: 2724 out_path = pjoin(self.dir_path,'SubProcesses','global_specs.inc') 2725 else: 2726 out_path = output_path 2727 2728 open(out_path,'w').write( 2729 """ integer MAXNEXTERNAL 2730 parameter(MAXNEXTERNAL=%d) 2731 integer OVERALLMAXRANK 2732 parameter(OVERALLMAXRANK=%d) 2733 integer NPROCS 2734 parameter(NPROCS=%d)"""%( 2735 max(me.get_nexternal_ninitial()[0] for me in me_list), 2736 max(me.get_max_loop_rank() for me in me_list), 2737 len(me_list)))
2738 2739
2740 - def fix_coef_specs(self, overall_max_lwf_spin, overall_max_loop_vert_rank):
2741 """ If processes with different maximum loop wavefunction size or 2742 different maximum loop vertex rank have to be output together, then 2743 the file 'coef.inc' in the HELAS Source folder must contain the overall 2744 maximum of these quantities. It is not safe though, and the user has 2745 been appropriatly warned at the output stage """ 2746 2747 # Remove the existing link 2748 coef_specs_path=os.path.join(self.dir_path,'Source','DHELAS',\ 2749 'coef_specs.inc') 2750 os.remove(coef_specs_path) 2751 2752 spin_to_wf_size = {1:4,2:4,3:4,4:16,5:16} 2753 overall_max_lwf_size = spin_to_wf_size[overall_max_lwf_spin] 2754 overall_max_loop_vert_coefs = q_polynomial.get_number_of_coefs_for_rank( 2755 overall_max_loop_vert_rank) 2756 # Replace it by the appropriate value 2757 IncWriter=writers.FortranWriter(coef_specs_path,'w') 2758 IncWriter.writelines("""INTEGER MAXLWFSIZE 2759 PARAMETER (MAXLWFSIZE=%(max_lwf_size)d) 2760 INTEGER VERTEXMAXCOEFS 2761 PARAMETER (VERTEXMAXCOEFS=%(vertex_max_coefs)d)"""\ 2762 %{'max_lwf_size':overall_max_lwf_size, 2763 'vertex_max_coefs':overall_max_loop_vert_coefs}) 2764 IncWriter.close()
2765
2766 - def setup_check_sa_replacement_dictionary(self, matrix_element, \ 2767 split_orders,squared_orders,amps_orders):
2768 """ Sets up the replacement dictionary for the writeout of the steering 2769 file check_sa.f""" 2770 if len(squared_orders)<1: 2771 matrix_element.rep_dict['print_so_loop_results']=\ 2772 "write(*,*) 'No split orders defined.'" 2773 elif len(squared_orders)==1: 2774 matrix_element.rep_dict['set_coupling_target']='' 2775 matrix_element.rep_dict['print_so_loop_results']=\ 2776 "write(*,*) 'All loop contributions are of split orders (%s)'"%( 2777 ' '.join(['%s=%d'%(split_orders[i],squared_orders[0][i]) \ 2778 for i in range(len(split_orders))])) 2779 else: 2780 matrix_element.rep_dict['set_coupling_target']='\n'.join([ 2781 '# Here we leave the default target squared split order to -1, meaning that we'+ 2782 ' aim at computing all individual contributions. You can choose otherwise.', 2783 'call %(proc_prefix)sSET_COUPLINGORDERS_TARGET(-1)'%matrix_element.rep_dict]) 2784 matrix_element.rep_dict['print_so_loop_results'] = '\n'.join([ 2785 '\n'.join(["write(*,*) '%dL) Loop ME for orders (%s) :'"%((j+1),(' '.join( 2786 ['%s=%d'%(split_orders[i],so[i]) for i in range(len(split_orders))]))), 2787 "IF (PREC_FOUND(%d).NE.-1.0d0) THEN"%(j+1), 2788 "write(*,*) ' > accuracy = ',PREC_FOUND(%d)"%(j+1), 2789 "ELSE", 2790 "write(*,*) ' > accuracy = NA'", 2791 "ENDIF", 2792 "write(*,*) ' > finite = ',MATELEM(1,%d)"%(j+1), 2793 "write(*,*) ' > 1eps = ',MATELEM(2,%d)"%(j+1), 2794 "write(*,*) ' > 2eps = ',MATELEM(3,%d)"%(j+1) 2795 ]) for j, so in enumerate(squared_orders)]) 2796 matrix_element.rep_dict['write_so_loop_results'] = '\n'.join( 2797 ["write (69,*) 'Split_Orders_Names %s'"%(' '.join(split_orders))]+ 2798 ['\n'.join([ 2799 "write (69,*) 'Loop_SO_Results %s'"%(' '.join( 2800 ['%d'%so_value for so_value in so])), 2801 "write (69,*) 'SO_Loop ACC ',PREC_FOUND(%d)"%(j+1), 2802 "write (69,*) 'SO_Loop FIN ',MATELEM(1,%d)"%(j+1), 2803 "write (69,*) 'SO_Loop 1EPS ',MATELEM(2,%d)"%(j+1), 2804 "write (69,*) 'SO_Loop 2EPS ',MATELEM(3,%d)"%(j+1), 2805 ]) for j, so in enumerate(squared_orders)]) 2806 2807 # We must reconstruct here the born squared orders. 2808 squared_born_so_orders = [] 2809 for i, amp_order in enumerate(amps_orders['born_amp_orders']): 2810 for j in range(0,i+1): 2811 key = tuple([ord1 + ord2 for ord1,ord2 in \ 2812 zip(amp_order[0],amps_orders['born_amp_orders'][j][0])]) 2813 if not key in squared_born_so_orders: 2814 squared_born_so_orders.append(key) 2815 if len(squared_born_so_orders)<1: 2816 matrix_element.rep_dict['print_so_born_results'] = '' 2817 elif len(squared_born_so_orders)==1: 2818 matrix_element.rep_dict['print_so_born_results'] = \ 2819 "write(*,*) 'All Born contributions are of split orders (%s)'"%( 2820 ' '.join(['%s=%d'%(split_orders[i],squared_born_so_orders[0][i]) 2821 for i in range(len(split_orders))])) 2822 else: 2823 matrix_element.rep_dict['print_so_born_results'] = '\n'.join([ 2824 "write(*,*) '%dB) Born ME for orders (%s) = ',MATELEM(0,%d)"%(j+1,' '.join( 2825 ['%s=%d'%(split_orders[i],so[i]) for i in range(len(split_orders))]),j+1) 2826 for j, so in enumerate(squared_born_so_orders)]) 2827 matrix_element.rep_dict['write_so_born_results'] = '\n'.join( 2828 ['\n'.join([ 2829 "write (69,*) 'Born_SO_Results %s'"%(' '.join( 2830 ['%d'%so_value for so_value in so])), 2831 "write (69,*) 'SO_Born BORN ',MATELEM(0,%d)"%(j+1), 2832 ]) for j, so in enumerate(squared_born_so_orders)]) 2833 2834 # Add a bottom bar to both print_so_[loop|born]_results 2835 matrix_element.rep_dict['print_so_born_results'] += \ 2836 '\nwrite (*,*) "---------------------------------"' 2837 matrix_element.rep_dict['print_so_loop_results'] += \ 2838 '\nwrite (*,*) "---------------------------------"'
2839
2840 - def write_tir_cache_size_include(self, writer):
2841 """Write the file 'tir_cache_size.inc' which sets the size of the TIR 2842 cache the the user wishes to employ and the default value for it. 2843 This can have an impact on MadLoop speed when using stability checks 2844 but also impacts in a non-negligible way MadLoop's memory footprint. 2845 It is therefore important that the user can chose its size.""" 2846 2847 # For the standalone optimized output, a size of one is necessary. 2848 # The MadLoop+MadEvent output sets it to 2 because it can gain further 2849 # speed increase with a TIR cache of size 2 due to the structure of the 2850 # calls to MadLoop there. 2851 tir_cach_size = "parameter(TIR_CACHE_SIZE=1)" 2852 writer.writelines(tir_cach_size)
2853
2854 - def write_loopmatrix(self, writer, matrix_element, fortran_model, \ 2855 write_auxiliary_files=True,):
2856 """Create the loop_matrix.f file.""" 2857 2858 if not matrix_element.get('processes') or \ 2859 not matrix_element.get('diagrams'): 2860 return 0 2861 2862 # Set lowercase/uppercase Fortran code 2863 writers.FortranWriter.downcase = False 2864 2865 # Starting off with the treatment of the split_orders since some 2866 # of the information extracted there will come into the 2867 # general_replace_dict. Split orders are abbreviated SO in all the 2868 # keys of the replacement dictionaries. 2869 2870 # Take care of the split_orders 2871 squared_orders, amps_orders = matrix_element.get_split_orders_mapping() 2872 # Creating here a temporary list containing only the information of 2873 # what are the different squared split orders contributing 2874 # (i.e. not using max_contrib_amp_number and max_contrib_ref_amp_number) 2875 sqso_contribs = [sqso[0] for sqso in squared_orders] 2876 split_orders = matrix_element.get('processes')[0].get('split_orders') 2877 # The entries set in the function below are only for check_sa written 2878 # out in write_loop__matrix_element_v4 (it is however placed here because the 2879 # split order information is only available here). 2880 self.setup_check_sa_replacement_dictionary(matrix_element, 2881 split_orders,sqso_contribs,amps_orders) 2882 2883 # Now recast the split order basis for the loop, born and counterterm 2884 # amplitude into one single splitorderbasis. 2885 overall_so_basis = list(set( 2886 [born_so[0] for born_so in amps_orders['born_amp_orders']]+ 2887 [born_so[0] for born_so in amps_orders['loop_amp_orders']])) 2888 # We must re-sort it to make sure it follows an increasing WEIGHT order 2889 order_hierarchy = matrix_element.get('processes')[0]\ 2890 .get('model').get('order_hierarchy') 2891 if set(order_hierarchy.keys()).union(set(split_orders))==\ 2892 set(order_hierarchy.keys()): 2893 overall_so_basis.sort(key= lambda so: 2894 sum([order_hierarchy[split_orders[i]]*order_power for \ 2895 i, order_power in enumerate(so)])) 2896 2897 # Those are additional entries used throughout the different files of 2898 # MadLoop5 2899 matrix_element.rep_dict['split_order_str_list'] = str(split_orders) 2900 matrix_element.rep_dict['nSO'] = len(split_orders) 2901 matrix_element.rep_dict['nSquaredSO'] = len(sqso_contribs) 2902 matrix_element.rep_dict['nAmpSO'] = len(overall_so_basis) 2903 2904 writers.FortranWriter('nsquaredSO.inc').writelines( 2905 """INTEGER NSQUAREDSO 2906 PARAMETER (NSQUAREDSO=%d)"""%matrix_element.rep_dict['nSquaredSO']) 2907 2908 replace_dict = copy.copy(matrix_element.rep_dict) 2909 # Build the general array mapping the split orders indices to their 2910 # definition 2911 replace_dict['ampsplitorders'] = '\n'.join(self.get_split_orders_lines(\ 2912 overall_so_basis,'AMPSPLITORDERS')) 2913 replace_dict['SquaredSO'] = '\n'.join(self.get_split_orders_lines(\ 2914 sqso_contribs,'SQPLITORDERS')) 2915 2916 # Specify what are the squared split orders selected by the proc def. 2917 replace_dict['chosen_so_configs'] = self.set_chosen_SO_index( 2918 matrix_element.get('processes')[0],sqso_contribs) 2919 2920 # Now we build the different arrays storing the split_orders ID of each 2921 # amp. 2922 ampSO_list=[-1]*sum(len(el[1]) for el in amps_orders['loop_amp_orders']) 2923 for SO in amps_orders['loop_amp_orders']: 2924 for amp_number in SO[1]: 2925 ampSO_list[amp_number-1]=overall_so_basis.index(SO[0])+1 2926 2927 replace_dict['loopAmpSO'] = '\n'.join(self.format_integer_list( 2928 ampSO_list,'LOOPAMPORDERS')) 2929 ampSO_list=[-1]*sum(len(el[1]) for el in amps_orders['born_amp_orders']) 2930 for SO in amps_orders['born_amp_orders']: 2931 for amp_number in SO[1]: 2932 ampSO_list[amp_number-1]=overall_so_basis.index(SO[0])+1 2933 replace_dict['BornAmpSO'] = '\n'.join(self.format_integer_list( 2934 ampSO_list,'BORNAMPORDERS')) 2935 2936 # We then go to the TIR setup 2937 # The first entry is the CutTools, we make sure it is available 2938 looplibs_av=['.TRUE.'] 2939 # one should be careful about the order in the following as it must match 2940 # the ordering in MadLoopParamsCard. 2941 for tir_lib in ['pjfry','iregi','golem','samurai','ninja','collier']: 2942 looplibs_av.append('.TRUE.' if tir_lib in self.all_tir and \ 2943 self.tir_available_dict[tir_lib] else '.FALSE.') 2944 replace_dict['data_looplibs_av']=','.join(looplibs_av) 2945 2946 # Helicity offset convention 2947 # For a given helicity, the attached integer 'i' means 2948 # 'i' in ]-inf;-HELOFFSET[ -> Helicity is equal, up to a sign, 2949 # to helicity number abs(i+HELOFFSET) 2950 # 'i' == -HELOFFSET -> Helicity is analytically zero 2951 # 'i' in ]-HELOFFSET,inf[ -> Helicity is contributing with weight 'i'. 2952 # If it is zero, it is skipped. 2953 # Typically, the hel_offset is 10000 2954 replace_dict['hel_offset'] = 10000 2955 2956 # Extract overall denominator 2957 # Averaging initial state color, spin, and identical FS particles 2958 den_factor_line = self.get_den_factor_line(matrix_element) 2959 replace_dict['den_factor_line'] = den_factor_line 2960 2961 # When the user asks for the polarized matrix element we must 2962 # multiply back by the helicity averaging factor 2963 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor() 2964 replace_dict['beamone_helavgfactor'], replace_dict['beamtwo_helavgfactor'] =\ 2965 matrix_element.get_beams_hel_avg_factor() 2966 2967 if write_auxiliary_files: 2968 # Write out the color matrix 2969 (CMNum,CMDenom) = self.get_color_matrix(matrix_element) 2970 CMWriter=open(pjoin('..','MadLoop5_resources', 2971 '%(proc_prefix)sColorNumFactors.dat'%matrix_element.rep_dict),'w') 2972 for ColorLine in CMNum: 2973 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 2974 CMWriter.close() 2975 CMWriter=open(pjoin('..','MadLoop5_resources', 2976 '%(proc_prefix)sColorDenomFactors.dat'%matrix_element.rep_dict),'w') 2977 for ColorLine in CMDenom: 2978 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 2979 CMWriter.close() 2980 2981 # Write out the helicity configurations 2982 HelConfigs=matrix_element.get_helicity_matrix() 2983 HelConfigWriter=open(pjoin('..','MadLoop5_resources', 2984 '%(proc_prefix)sHelConfigs.dat'%matrix_element.rep_dict),'w') 2985 for HelConfig in HelConfigs: 2986 HelConfigWriter.write(' '.join(['%d'%H for H in HelConfig])+'\n') 2987 HelConfigWriter.close() 2988 2989 # Extract helas calls 2990 born_ct_helas_calls, uvct_helas_calls = \ 2991 fortran_model.get_born_ct_helas_calls(matrix_element, 2992 squared_orders=squared_orders,split_orders=split_orders) 2993 coef_construction, coef_merging = fortran_model.get_coef_construction_calls(\ 2994 matrix_element,group_loops=self.group_loops, 2995 squared_orders=squared_orders,split_orders=split_orders) 2996 2997 loop_CT_calls = fortran_model.get_loop_CT_calls(matrix_element,\ 2998 group_loops=self.group_loops, 2999 squared_orders=squared_orders, split_orders=split_orders) 3000 # The proc_prefix must be replaced 3001 coef_construction = [c % matrix_element.rep_dict for c 3002 in coef_construction] 3003 loop_CT_calls = [lc % matrix_element.rep_dict for lc in loop_CT_calls] 3004 3005 file = open(os.path.join(self.template_dir,\ 3006 'loop_matrix_standalone.inc')).read() 3007 3008 # Setup the contextual environment which is used in the splitting 3009 # functions below 3010 context = self.get_context(matrix_element) 3011 file=self.split_HELASCALLS(writer,replace_dict,\ 3012 'helas_calls_split.inc',file,born_ct_helas_calls,\ 3013 'born_ct_helas_calls','helas_calls_ampb', 3014 required_so_broadcaster = 'CT_REQ_SO_DONE', 3015 continue_label = 2000, context = context) 3016 file=self.split_HELASCALLS(writer,replace_dict,\ 3017 'helas_calls_split.inc',file,uvct_helas_calls,\ 3018 'uvct_helas_calls','helas_calls_uvct', 3019 required_so_broadcaster = 'UVCT_REQ_SO_DONE', 3020 continue_label = 3000, context=context) 3021 file=self.split_HELASCALLS(writer,replace_dict,\ 3022 'helas_calls_split.inc',file,coef_construction,\ 3023 'coef_construction','coef_construction', 3024 required_so_broadcaster = 'LOOP_REQ_SO_DONE', 3025 continue_label = 4000, context=context) 3026 file=self.split_HELASCALLS(writer,replace_dict,\ 3027 'helas_calls_split.inc',file,loop_CT_calls,\ 3028 'loop_CT_calls','loop_CT_calls', 3029 required_so_broadcaster = 'CTCALL_REQ_SO_DONE', 3030 continue_label = 5000, context=context) 3031 3032 # Add the entries above to the general_replace_dict so that it can be 3033 # used by write_mp_compute_loop_coefs later 3034 matrix_element.rep_dict['loop_CT_calls']=replace_dict['loop_CT_calls'] 3035 matrix_element.rep_dict['born_ct_helas_calls']=replace_dict['born_ct_helas_calls'] 3036 matrix_element.rep_dict['uvct_helas_calls']=replace_dict['uvct_helas_calls'] 3037 matrix_element.rep_dict['coef_construction']=replace_dict['coef_construction'] 3038 3039 replace_dict['coef_merging']='\n'.join(coef_merging) 3040 file = file % replace_dict 3041 number_of_calls = len([call for call in loop_CT_calls if call.find('CALL LOOP') != 0]) 3042 if writer: 3043 # Write the file 3044 writer.writelines(file,context=context) 3045 return number_of_calls 3046 else: 3047 # Return it to be written along with the others 3048 return number_of_calls, file
3049 3050 #=============================================================================== 3051 # LoopProcessExporterFortranSA 3052 #===============================================================================
3053 -class LoopProcessExporterFortranMatchBox(LoopProcessOptimizedExporterFortranSA, 3054 export_v4.ProcessExporterFortranMatchBox):
3055 """Class to take care of exporting a set of loop matrix elements in the 3056 Fortran format.""" 3057 3058 default_opt = {'clean': False, 'complex_mass':False, 3059 'export_format':'madloop_matchbox', 'mp':True, 3060 'loop_dir':'', 'cuttools_dir':'', 3061 'fortran_compiler':'gfortran', 3062 'output_dependencies':'external', 3063 'sa_symmetry':True} 3064 3065 3066
3067 - def get_color_string_lines(self, matrix_element):
3068 """Return the color matrix definition lines for this matrix element. Split 3069 rows in chunks of size n.""" 3070 3071 return export_v4.ProcessExporterFortranMatchBox.get_color_string_lines(matrix_element)
3072 3073
3074 - def get_JAMP_lines(self, *args, **opts):
3075 """Adding leading color part of the colorflow""" 3076 3077 return export_v4.ProcessExporterFortranMatchBox.get_JAMP_lines(self, *args, **opts)
3078
3079 - def get_ME_identifier(self, matrix_element, group_number = None, group_elem_number = None):
3080 """ To not mix notations between borns and virtuals we call it here also MG5 """ 3081 return 'MG5_%d_'%matrix_element.get('processes')[0].get('id')
3082 3083 3084 #=============================================================================== 3085 # LoopInducedExporter 3086 #===============================================================================
3087 -class LoopInducedExporterME(LoopProcessOptimizedExporterFortranSA):
3088 """ A class to specify all the functions common to LoopInducedExporterMEGroup 3089 and LoopInducedExporterMENoGroup (but not relevant for the original 3090 Madevent exporters)""" 3091 3092 madloop_makefile_name = 'makefile_MadLoop' 3093 3094
3095 - def __init__(self, *args, **opts):
3096 """ Initialize the process, setting the proc characteristics.""" 3097 super(LoopInducedExporterME, self).__init__(*args, **opts) 3098 self.proc_characteristic['loop_induced'] = True
3099
3100 - def get_context(self,*args,**opts):
3101 """ Make sure that the contextual variable MadEventOutput is set to 3102 True for this exporter""" 3103 3104 context = super(LoopInducedExporterME,self).get_context(*args,**opts) 3105 context['MadEventOutput'] = True 3106 return context
3107 3108 #=========================================================================== 3109 # write a procdef_mg5 (an equivalent of the MG4 proc_card.dat) 3110 #===========================================================================
3111 - def write_procdef_mg5(self, file_pos, modelname, process_str):
3112 """ write an equivalent of the MG4 proc_card in order that all the Madevent 3113 Perl script of MadEvent4 are still working properly for pure MG5 run. 3114 Not needed for StandAlone so we need to call the correct one 3115 """ 3116 3117 return export_v4.ProcessExporterFortran.write_procdef_mg5( 3118 self, file_pos, modelname, process_str)
3119
3120 - def get_source_libraries_list(self):
3121 """ Returns the list of libraries to be compiling when compiling the 3122 SOURCE directory. It is different for loop_induced processes and 3123 also depends on the value of the 'output_dependencies' option""" 3124 3125 libraries_list = super(LoopInducedExporterME,self).\ 3126 get_source_libraries_list() 3127 3128 if self.dependencies=='internal': 3129 libraries_list.append('$(LIBDIR)libcts.$(libext)') 3130 libraries_list.append('$(LIBDIR)libiregi.$(libext)') 3131 3132 return libraries_list
3133 3140
3141 - def copy_template(self, *args, **opts):
3142 """Pick the right mother functions 3143 """ 3144 # Call specifically the necessary building functions for the mixed 3145 # template setup for both MadEvent and MadLoop standalone 3146 LoopProcessExporterFortranSA.loop_additional_template_setup(self, 3147 copy_Source_makefile=False) 3148 3149 LoopProcessOptimizedExporterFortranSA.\ 3150 loop_optimized_additional_template_setup(self)
3151 3152 3153 #=========================================================================== 3154 # Create jpeg diagrams, html pages,proc_card_mg5.dat and madevent.tar.gz 3155 #===========================================================================
3156 - def finalize(self, matrix_elements, history, mg5options, flaglist):
3157 """Function to finalize v4 directory, for inheritance. 3158 """ 3159 3160 self.proc_characteristic['loop_induced'] = True 3161 3162 # This can be uncommented if one desires to have the MadLoop 3163 # initialization performed at the end of the output phase. 3164 # Alternatively, one can simply execute the command 'initMadLoop' in 3165 # the madevent interactive interface after the output. 3166 # from madgraph.interface.madevent_interface import MadLoopInitializer 3167 # MadLoopInitializer.init_MadLoop(self.dir_path, 3168 # subproc_prefix=self.SubProc_prefix, MG_options=None) 3169 3170 self.write_global_specs(matrix_elements)
3171
3172 - def write_tir_cache_size_include(self, writer):
3173 """Write the file 'tir_cache_size.inc' which sets the size of the TIR 3174 cache the the user wishes to employ and the default value for it. 3175 This can have an impact on MadLoop speed when using stability checks 3176 but also impacts in a non-negligible way MadLoop's memory footprint. 3177 It is therefore important that the user can chose its size.""" 3178 3179 # In this case of MadLoop+MadEvent output, we set it to 2 because we 3180 # gain further speed increase with a TIR cache of size 2 due to the 3181 # the fact that we call MadLoop once per helicity configuration in this 3182 # case. 3183 tir_cach_size = "parameter(TIR_CACHE_SIZE=2)" 3184 writer.writelines(tir_cach_size)
3185
3186 - def write_matrix_element_v4(self, writer, matrix_element, fortran_model, 3187 proc_id = None, config_map = [], subproc_number = None):
3188 """ Write it the wrapper to call the ML5 subroutine in the library.""" 3189 3190 # Generating the MadEvent wrapping ME's routines 3191 if not matrix_element.get('processes') or \ 3192 not matrix_element.get('diagrams'): 3193 return 0 3194 3195 if not isinstance(writer, writers.FortranWriter): 3196 raise writers.FortranWriter.FortranWriterError(\ 3197 "writer not FortranWriter") 3198 3199 replace_dict = copy.copy(matrix_element.rep_dict) 3200 3201 # Extract version number and date from VERSION file 3202 info_lines = self.get_mg5_info_lines() 3203 replace_dict['info_lines'] = info_lines 3204 3205 # Extract process info lines 3206 process_lines = self.get_process_info_lines(matrix_element) 3207 replace_dict['process_lines'] = process_lines 3208 3209 # Set proc_id 3210 # It can be set to None when write_matrix_element_v4 is called without 3211 # grouping. In this case the subroutine SMATRIX should take an empty 3212 # suffix. 3213 if proc_id is None: 3214 replace_dict['proc_id'] = '' 3215 else: 3216 replace_dict['proc_id'] = proc_id 3217 3218 #set the average over the number of initial helicities 3219 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor() 3220 replace_dict['beamone_helavgfactor'], replace_dict['beamtwo_helavgfactor'] =\ 3221 matrix_element.get_beams_hel_avg_factor() 3222 3223 # Extract helicity lines 3224 helicity_lines = self.get_helicity_lines(matrix_element) 3225 replace_dict['helicity_lines'] = helicity_lines 3226 3227 3228 # Extract ndiags 3229 ndiags = len(matrix_element.get('diagrams')) 3230 replace_dict['ndiags'] = ndiags 3231 3232 # Set define_iconfigs_lines 3233 replace_dict['define_iconfigs_lines'] = \ 3234 """INTEGER MAPCONFIG(0:LMAXCONFIGS), ICONFIG 3235 COMMON/TO_MCONFIGS/MAPCONFIG, ICONFIG""" 3236 3237 if proc_id: 3238 # Set lines for subprocess group version 3239 # Set define_iconfigs_lines 3240 replace_dict['define_iconfigs_lines'] += \ 3241 """\nINTEGER SUBDIAG(MAXSPROC),IB(2) 3242 COMMON/TO_SUB_DIAG/SUBDIAG,IB""" 3243 # Set set_amp2_line 3244 replace_dict['configID_in_matrix'] = "SUBDIAG(%s)"%proc_id 3245 else: 3246 # Standard running 3247 # Set set_amp2_line 3248 replace_dict['configID_in_matrix'] = "MAPCONFIG(ICONFIG)" 3249 3250 # If group_numer 3251 replace_dict['ml_prefix'] = \ 3252 self.get_ME_identifier(matrix_element, subproc_number, proc_id) 3253 3254 # Extract ncolor 3255 ncolor = max(1, len(matrix_element.get('color_basis'))) 3256 replace_dict['ncolor'] = ncolor 3257 3258 n_tot_diags = len(matrix_element.get_loop_diagrams()) 3259 replace_dict['n_tot_diags'] = n_tot_diags 3260 3261 file = open(pjoin(_file_path, \ 3262 'iolibs/template_files/%s' % self.matrix_file)).read() 3263 file = file % replace_dict 3264 3265 # Write the file 3266 writer.writelines(file) 3267 3268 return 0, ncolor
3269
3270 - def get_amp2_lines(self, *args, **opts):
3271 """Make sure the function is implemented in the daughters""" 3272 3273 raise NotImplemented('The function get_amp2_lines must be called in '+\ 3274 ' the daugthers of LoopInducedExporterME')
3275 3276 #=============================================================================== 3277 # LoopInducedExporterMEGroup 3278 #===============================================================================
3279 -class LoopInducedExporterMEGroup(LoopInducedExporterME, 3280 export_v4.ProcessExporterFortranMEGroup):
3281 """Class to take care of exporting a set of grouped loop induced matrix 3282 elements""" 3283 3284 matrix_file = "matrix_loop_induced_madevent_group.inc" 3285 3291
3292 - def write_source_makefile(self, *args, **opts):
3293 """Pick the correct write_source_makefile function from 3294 ProcessExporterFortranMEGroup""" 3295 3296 export_v4.ProcessExporterFortranMEGroup.write_source_makefile(self, 3297 *args, **opts)
3298
3299 - def copy_template(self, *args, **opts):
3300 """Pick the right mother functions 3301 """ 3302 # Call specifically the necessary building functions for the mixed 3303 # template setup for both MadEvent and MadLoop standalone 3304 3305 # Start witht the MadEvent one 3306 export_v4.ProcessExporterFortranMEGroup.copy_template(self,*args,**opts) 3307 3308 # Then the MadLoop-standalone related one 3309 LoopInducedExporterME.copy_template(self, *args, **opts)
3310
3311 - def finalize(self, *args, **opts):
3312 """Pick the right mother functions 3313 """ 3314 # Call specifically what finalize_v4_directory must be used, so that the 3315 # MRO doesn't interfere. 3316 3317 self.proc_characteristic['loop_induced'] = True 3318 3319 export_v4.ProcessExporterFortranMEGroup.finalize(self,*args,**opts) 3320 3321 # And the finilize from LoopInducedExporterME which essentially takes 3322 # care of MadLoop virtuals initialization 3323 LoopInducedExporterME.finalize(self,*args,**opts)
3324
3325 - def generate_subprocess_directory(self, subproc_group, 3326 fortran_model,group_number):
3327 """Generate the Pn directory for a subprocess group in MadEvent, 3328 including the necessary matrix_N.f files, configs.inc and various 3329 other helper files""" 3330 3331 # Generate the MadLoop files 3332 calls = 0 3333 matrix_elements = subproc_group.get('matrix_elements') 3334 for ime, matrix_element in enumerate(matrix_elements): 3335 self.unique_id +=1 3336 calls += self.generate_loop_subprocess(matrix_element,fortran_model, 3337 group_number = group_number, proc_id = str(ime+1), 3338 # group_number = str(subproc_group.get('number')), proc_id = str(ime+1), 3339 config_map = subproc_group.get('diagram_maps')[ime], 3340 unique_id=self.unique_id) 3341 3342 # Then generate the MadEvent files 3343 export_v4.ProcessExporterFortranMEGroup.generate_subprocess_directory( 3344 self, subproc_group,fortran_model,group_number) 3345 3346 return calls
3347
3348 - def get_amp2_lines(self, matrix_element, replace_dict, config_map):
3349 """Return the various replacement dictionary inputs necessary for the 3350 multichanneling amp2 definition for the loop-induced MadEvent output. 3351 """ 3352 3353 if not config_map: 3354 raise MadGraph5Error('A multi-channeling configuration map is '+\ 3355 ' necessary for the MadEvent Loop-induced output with grouping.') 3356 3357 nexternal, ninitial = matrix_element.get_nexternal_ninitial() 3358 3359 ret_lines = [] 3360 # In this case, we need to sum up all amplitudes that have 3361 # identical topologies, as given by the config_map (which 3362 # gives the topology/config for each of the diagrams 3363 if isinstance(matrix_element, loop_helas_objects.LoopHelasMatrixElement): 3364 diagrams = matrix_element.get_loop_diagrams() 3365 else: 3366 diagrams = matrix_element.get('diagrams') 3367 3368 # Note that we need to use AMP2 number corresponding to the first 3369 # diagram number used for that AMP2. 3370 # The dictionary below maps the config ID to this corresponding first 3371 # diagram number 3372 config_index_map = {} 3373 # For each diagram number, the dictionary below gives the config_id it 3374 # belongs to or 0 if it doesn't belong to any. 3375 loop_amp_ID_to_config = {} 3376 3377 # Combine the diagrams with identical topologies 3378 config_to_diag_dict = {} 3379 for idiag, diag in enumerate(diagrams): 3380 try: 3381 config_to_diag_dict[config_map[idiag]].append(idiag) 3382 except KeyError: 3383 config_to_diag_dict[config_map[idiag]] = [idiag] 3384 3385 for config in sorted(config_to_diag_dict.keys()): 3386 config_index_map[config] = (config_to_diag_dict[config][0] + 1) 3387 3388 # First add the UV and R2 counterterm amplitudes of each selected 3389 # diagram for the multichannel config 3390 CT_amp_numbers = [a.get('number') for a in \ 3391 sum([diagrams[idiag].get_ct_amplitudes() for \ 3392 idiag in config_to_diag_dict[config]], [])] 3393 3394 for CT_amp_number in CT_amp_numbers: 3395 loop_amp_ID_to_config[CT_amp_number] = config 3396 3397 # Now add here the loop amplitudes. 3398 loop_amp_numbers = [a.get('amplitudes')[0].get('number') 3399 for a in sum([diagrams[idiag].get_loop_amplitudes() for \ 3400 idiag in config_to_diag_dict[config]], [])] 3401 3402 for loop_amp_number in loop_amp_numbers: 3403 loop_amp_ID_to_config[loop_amp_number] = config 3404 3405 # Notice that the config_id's are not necessarily sequential here, so 3406 # the size of the config_index_map array has to be the maximum over all 3407 # config_ids. 3408 # config_index_map should never be empty unless there was no diagram, 3409 # so the expression below is ok. 3410 n_configs = max(config_index_map.keys()) 3411 replace_dict['nmultichannel_configs'] = n_configs 3412 3413 # We must fill the empty entries of the map with the dummy amplitude 3414 # number 0. 3415 conf_list = [(config_index_map[i] if i in config_index_map else 0) \ 3416 for i in range(1,n_configs+1)] 3417 # Now the placeholder 'nmultichannels' refers to the number of 3418 # multi-channels which are contributing, so we must filter out zeros. 3419 replace_dict['nmultichannels'] = len([_ for _ in conf_list if _!=0]) 3420 3421 # Now write the amp2 related inputs in the replacement dictionary 3422 res_list = [] 3423 chunk_size = 6 3424 for k in range(0, len(conf_list), chunk_size): 3425 res_list.append("DATA (config_index_map(i),i=%6r,%6r) /%s/" % \ 3426 (k + 1, min(k + chunk_size, len(conf_list)), 3427 ','.join(["%6r" % i for i in conf_list[k:k + chunk_size]]))) 3428 3429 replace_dict['config_index_map_definition'] = '\n'.join(res_list) 3430 3431 res_list = [] 3432 n_loop_amps = max(loop_amp_ID_to_config.keys()) 3433 amp_list = [loop_amp_ID_to_config[i] for i in \ 3434 sorted(loop_amp_ID_to_config.keys()) if i!=0] 3435 chunk_size = 6 3436 for k in range(0, len(amp_list), chunk_size): 3437 res_list.append("DATA (CONFIG_MAP(i),i=%6r,%6r) /%s/" % \ 3438 (k + 1, min(k + chunk_size, len(amp_list)), 3439 ','.join(["%6r" % i for i in amp_list[k:k + chunk_size]]))) 3440 3441 replace_dict['config_map_definition'] = '\n'.join(res_list) 3442 3443 return
3444 3445 #=============================================================================== 3446 # LoopInducedExporterMENoGroup 3447 #===============================================================================
3448 -class LoopInducedExporterMENoGroup(LoopInducedExporterME, 3449 export_v4.ProcessExporterFortranME):
3450 """Class to take care of exporting a set of individual loop induced matrix 3451 elements""" 3452 3453 matrix_file = "matrix_loop_induced_madevent.inc" 3454 3460
3461 - def write_source_makefile(self, *args, **opts):
3462 """Pick the correct write_source_makefile function from 3463 ProcessExporterFortran""" 3464 3465 super(export_v4.ProcessExporterFortranME,self).\ 3466 write_source_makefile(*args, **opts)
3467
3468 - def copy_template(self, *args, **opts):
3469 """Pick the right mother functions 3470 """ 3471 # Call specifically the necessary building functions for the mixed 3472 # template setup for both MadEvent and MadLoop standalone 3473 3474 # Start witht the MadEvent one 3475 export_v4.ProcessExporterFortranME.copy_template(self,*args,**opts) 3476 3477 # Then the MadLoop-standalone related one 3478 LoopInducedExporterME.copy_template(self, *args, **opts)
3479
3480 - def finalize(self, *args, **opts):
3481 """Pick the right mother functions 3482 """ 3483 3484 self.proc_characteristic['loop_induced'] = True 3485 # Call specifically what finalize must be used, so that the 3486 # MRO doesn't interfere. 3487 export_v4.ProcessExporterFortranME.finalize(self, *args, **opts) 3488 3489 # And the finilize_v4 from LoopInducedExporterME which essentially takes 3490 # care of MadLoop virtuals initialization 3491 LoopInducedExporterME.finalize(self, *args, **opts)
3492
3493 - def generate_subprocess_directory(self, matrix_element, fortran_model, me_number):
3494 """Generate the Pn directory for a subprocess group in MadEvent, 3495 including the necessary matrix_N.f files, configs.inc and various 3496 other helper files""" 3497 3498 self.unique_id += 1 3499 # Then generate the MadLoop files 3500 calls = self.generate_loop_subprocess(matrix_element,fortran_model, 3501 group_number = me_number, 3502 unique_id=self.unique_id) 3503 3504 3505 # First generate the MadEvent files 3506 calls += export_v4.ProcessExporterFortranME.generate_subprocess_directory( 3507 self, matrix_element, fortran_model, me_number) 3508 return calls
3509
3510 - def get_amp2_lines(self, matrix_element, replace_dict, config_map):
3511 """Return the amp2(i) = sum(amp for diag(i))^2 lines""" 3512 3513 if config_map: 3514 raise MadGraph5Error('A configuration map should not be specified'+\ 3515 ' for the Loop induced exporter without grouping.') 3516 3517 nexternal, ninitial = matrix_element.get_nexternal_ninitial() 3518 # Get minimum legs in a vertex 3519 vert_list = [max(diag.get_vertex_leg_numbers()) for diag in \ 3520 matrix_element.get('diagrams') if diag.get_vertex_leg_numbers()!=[]] 3521 minvert = min(vert_list) if vert_list!=[] else 0 3522 3523 # Note that we need to use AMP2 number corresponding to the first 3524 # diagram number used for that AMP2. 3525 # The dictionary below maps the config ID to this corresponding first 3526 # diagram number 3527 config_index_map = {} 3528 # For each diagram number, the dictionary below gives the config_id it 3529 # belongs to or 0 if it doesn't belong to any. 3530 loop_amp_ID_to_config = {} 3531 3532 n_configs = 0 3533 for idiag, diag in enumerate(matrix_element.get('diagrams')): 3534 # Ignore any diagrams with 4-particle vertices. 3535 use_for_multichanneling = True 3536 if diag.get_vertex_leg_numbers()!=[] and max(diag.get_vertex_leg_numbers()) > minvert: 3537 use_for_multichanneling = False 3538 curr_config = 0 3539 else: 3540 n_configs += 1 3541 curr_config = n_configs 3542 3543 if not use_for_multichanneling: 3544 if 0 not in config_index_map: 3545 config_index_map[0] = idiag + 1 3546 else: 3547 config_index_map[curr_config] = idiag + 1 3548 3549 CT_amps = [ a.get('number') for a in diag.get_ct_amplitudes()] 3550 for CT_amp in CT_amps: 3551 loop_amp_ID_to_config[CT_amp] = curr_config 3552 3553 Loop_amps = [a.get('amplitudes')[0].get('number') 3554 for a in diag.get_loop_amplitudes()] 3555 for Loop_amp in Loop_amps: 3556 loop_amp_ID_to_config[Loop_amp] = curr_config 3557 3558 # Now write the amp2 related inputs in the replacement dictionary 3559 n_configs = len([k for k in config_index_map.keys() if k!=0]) 3560 replace_dict['nmultichannel_configs'] = n_configs 3561 # Now the placeholder 'nmultichannels' refers to the number of 3562 # multi-channels which are contributing which, in the non-grouped case 3563 # is always equal to the total number of multi-channels. 3564 replace_dict['nmultichannels'] = n_configs 3565 3566 res_list = [] 3567 conf_list = [config_index_map[i] for i in sorted(config_index_map.keys()) 3568 if i!=0] 3569 chunk_size = 6 3570 for k in range(0, len(conf_list), chunk_size): 3571 res_list.append("DATA (config_index_map(i),i=%6r,%6r) /%s/" % \ 3572 (k + 1, min(k + chunk_size, len(conf_list)), 3573 ','.join(["%6r" % i for i in conf_list[k:k + chunk_size]]))) 3574 3575 replace_dict['config_index_map_definition'] = '\n'.join(res_list) 3576 3577 res_list = [] 3578 n_loop_amps = max(loop_amp_ID_to_config.keys()) 3579 amp_list = [loop_amp_ID_to_config[i] for i in \ 3580 sorted(loop_amp_ID_to_config.keys()) if i!=0] 3581 chunk_size = 6 3582 for k in range(0, len(amp_list), chunk_size): 3583 res_list.append("DATA (CONFIG_MAP(i),i=%6r,%6r) /%s/" % \ 3584 (k + 1, min(k + chunk_size, len(amp_list)), 3585 ','.join(["%6r" % i for i in amp_list[k:k + chunk_size]]))) 3586 3587 replace_dict['config_map_definition'] = '\n'.join(res_list)
3588