Package madgraph :: Package iolibs :: Module export_v4
[hide private]
[frames] | no frames]

Source Code for Module madgraph.iolibs.export_v4

   1  ################################################################################ 
   2  # 
   3  # Copyright (c) 2009 The MadGraph5_aMC@NLO Development team and Contributors 
   4  # 
   5  # This file is a part of the MadGraph5_aMC@NLO project, an application which  
   6  # automatically generates Feynman diagrams and matrix elements for arbitrary 
   7  # high-energy processes in the Standard Model and beyond. 
   8  # 
   9  # It is subject to the MadGraph5_aMC@NLO license which should accompany this  
  10  # distribution. 
  11  # 
  12  # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch 
  13  # 
  14  ################################################################################ 
  15  from __future__ import absolute_import, division 
  16  from madgraph.iolibs.helas_call_writers import HelasCallWriter 
  17  from six.moves import range 
  18  from six.moves import zip 
  19  from fractions import Fraction 
  20  """Methods and classes to export matrix elements to v4 format.""" 
  21   
  22  import copy 
  23  from six import StringIO 
  24  import itertools 
  25  import fractions 
  26  import glob 
  27  import logging 
  28  import math 
  29  import os 
  30  import io 
  31  import re 
  32  import shutil 
  33  import subprocess 
  34  import sys 
  35  import time 
  36  import traceback 
  37  import  collections 
  38   
  39  import aloha 
  40   
  41  import madgraph.core.base_objects as base_objects 
  42  import madgraph.core.color_algebra as color 
  43  import madgraph.core.helas_objects as helas_objects 
  44  import madgraph.iolibs.drawing_eps as draw 
  45  import madgraph.iolibs.files as files 
  46  import madgraph.iolibs.group_subprocs as group_subprocs 
  47  import madgraph.iolibs.file_writers as writers 
  48  import madgraph.iolibs.gen_infohtml as gen_infohtml 
  49  import madgraph.iolibs.template_files as template_files 
  50  import madgraph.iolibs.ufo_expression_parsers as parsers 
  51  import madgraph.iolibs.helas_call_writers as helas_call_writers 
  52  import madgraph.interface.common_run_interface as common_run_interface 
  53  import madgraph.various.diagram_symmetry as diagram_symmetry 
  54  import madgraph.various.misc as misc 
  55  import madgraph.various.banner as banner_mod 
  56  import madgraph.various.process_checks as process_checks 
  57  import madgraph.loop.loop_diagram_generation as loop_diagram_generation 
  58  import aloha.create_aloha as create_aloha 
  59  import models.import_ufo as import_ufo 
  60  import models.write_param_card as param_writer 
  61  import models.check_param_card as check_param_card 
  62  from models import UFOError 
  63   
  64   
  65  from madgraph import MadGraph5Error, MG5DIR, ReadWrite 
  66  from madgraph.iolibs.files import cp, ln, mv 
  67   
  68  from madgraph import InvalidCmd 
  69   
  70  pjoin = os.path.join 
  71   
  72  _file_path = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0] + '/' 
  73  logger = logging.getLogger('madgraph.export_v4') 
  74   
  75  default_compiler= {'fortran': 'gfortran', 
  76                         'f2py': 'f2py', 
  77                         'cpp':'g++'} 
78 79 80 -class VirtualExporter(object):
81 82 #exporter variable who modified the way madgraph interacts with this class 83 84 grouped_mode = 'madevent' 85 # This variable changes the type of object called within 'generate_subprocess_directory' 86 #functions. 87 # False to avoid grouping (only identical matrix element are merged) 88 # 'madevent' group the massless quark and massless lepton 89 # 'madweight' group the gluon with the massless quark 90 sa_symmetry = False 91 # If no grouped_mode=False, uu~ and u~u will be called independently. 92 #Putting sa_symmetry generates only one of the two matrix-element. 93 check = True 94 # Ask madgraph to check if the directory already exists and propose to the user to 95 #remove it first if this is the case 96 output = 'Template' 97 # [Template, None, dir] 98 # - Template, madgraph will call copy_template 99 # - dir, madgraph will just create an empty directory for initialisation 100 # - None, madgraph do nothing for initialisation 101 exporter = 'v4' 102 # language of the output 'v4' for Fortran output 103 # 'cpp' for C++ output 104 105
106 - def __init__(self, dir_path = "", opt=None):
107 # cmd_options is a dictionary with all the optional argurment passed at output time 108 109 # Activate some monkey patching for the helas call writer. 110 helas_call_writers.HelasCallWriter.customize_argument_for_all_other_helas_object = \ 111 self.helas_call_writer_custom
112 113 114 # helper function for customise helas writter 115 @staticmethod
116 - def custom_helas_call(call, arg):
117 """static method to customise the way aloha function call are written 118 call is the default template for the call 119 arg are the dictionary used for the call 120 """ 121 return call, arg
122 123 helas_call_writer_custom = lambda x,y,z: x.custom_helas_call(y,z) 124 125
126 - def copy_template(self, model):
127 return
128
129 - def generate_subprocess_directory(self, subproc_group, helicity_model, me=None):
130 # generate_subprocess_directory(self, matrix_element, helicity_model, me_number) [for ungrouped] 131 return 0 # return an integer stating the number of call to helicity routine
132
133 - def convert_model(self, model, wanted_lorentz=[], wanted_couplings=[]):
134 return
135
136 - def finalize(self,matrix_element, cmdhistory, MG5options, outputflag):
137 return
138 139
140 - def pass_information_from_cmd(self, cmd):
141 """pass information from the command interface to the exporter. 142 Please do not modify any object of the interface from the exporter. 143 """ 144 return
145
146 - def modify_grouping(self, matrix_element):
147 return False, matrix_element
148
149 - def export_model_files(self, model_v4_path):
150 raise Exception("V4 model not supported by this type of exporter. Please use UFO model") 151 return
152
153 - def export_helas(self, HELAS_PATH):
154 raise Exception("V4 model not supported by this type of exporter. Please use UFO model") 155 return
156
157 #=============================================================================== 158 # ProcessExporterFortran 159 #=============================================================================== 160 -class ProcessExporterFortran(VirtualExporter):
161 """Class to take care of exporting a set of matrix elements to 162 Fortran (v4) format.""" 163 164 default_opt = {'clean': False, 'complex_mass':False, 165 'export_format':'madevent', 'mp': False, 166 'v5_model': True, 167 'output_options':{} 168 } 169 grouped_mode = False 170 jamp_optim = False 171
172 - def __init__(self, dir_path = "", opt=None):
173 """Initiate the ProcessExporterFortran with directory information""" 174 self.mgme_dir = MG5DIR 175 self.dir_path = dir_path 176 self.model = None 177 178 self.opt = dict(self.default_opt) 179 if opt: 180 self.opt.update(opt) 181 self.cmd_options = self.opt['output_options'] 182 183 #place holder to pass information to the run_interface 184 self.proc_characteristic = banner_mod.ProcCharacteristic() 185 # call mother class 186 super(ProcessExporterFortran,self).__init__(dir_path, opt)
187 188 189 #=========================================================================== 190 # process exporter fortran switch between group and not grouped 191 #===========================================================================
192 - def export_processes(self, matrix_elements, fortran_model):
193 """Make the switch between grouped and not grouped output""" 194 195 calls = 0 196 if isinstance(matrix_elements, group_subprocs.SubProcessGroupList): 197 for (group_number, me_group) in enumerate(matrix_elements): 198 calls = calls + self.generate_subprocess_directory(\ 199 me_group, fortran_model, group_number) 200 else: 201 for me_number, me in enumerate(matrix_elements.get_matrix_elements()): 202 calls = calls + self.generate_subprocess_directory(\ 203 me, fortran_model, me_number) 204 205 return calls
206 207 208 #=========================================================================== 209 # create the run_card 210 #===========================================================================
211 - def create_run_card(self, matrix_elements, history):
212 """ """ 213 214 215 # bypass this for the loop-check 216 import madgraph.loop.loop_helas_objects as loop_helas_objects 217 if isinstance(matrix_elements, loop_helas_objects.LoopHelasMatrixElement): 218 matrix_elements = None 219 220 run_card = banner_mod.RunCard() 221 222 223 default=True 224 if isinstance(matrix_elements, group_subprocs.SubProcessGroupList): 225 processes = [me.get('processes') for megroup in matrix_elements 226 for me in megroup['matrix_elements']] 227 elif matrix_elements: 228 processes = [me.get('processes') 229 for me in matrix_elements['matrix_elements']] 230 else: 231 default =False 232 233 if default: 234 run_card.create_default_for_process(self.proc_characteristic, 235 history, 236 processes) 237 238 run_card.write(pjoin(self.dir_path, 'Cards', 'run_card_default.dat')) 239 shutil.copyfile(pjoin(self.dir_path, 'Cards', 'run_card_default.dat'), 240 pjoin(self.dir_path, 'Cards', 'run_card.dat'))
241 242 243 244 #=========================================================================== 245 # copy the Template in a new directory. 246 #===========================================================================
247 - def copy_template(self, model):
248 """create the directory run_name as a copy of the MadEvent 249 Template, and clean the directory 250 """ 251 252 #First copy the full template tree if dir_path doesn't exit 253 if not os.path.isdir(self.dir_path): 254 assert self.mgme_dir, \ 255 "No valid MG_ME path given for MG4 run directory creation." 256 logger.info('initialize a new directory: %s' % \ 257 os.path.basename(self.dir_path)) 258 shutil.copytree(pjoin(self.mgme_dir, 'Template/LO'), 259 self.dir_path, True) 260 # misc.copytree since dir_path already exists 261 misc.copytree(pjoin(self.mgme_dir, 'Template/Common'), 262 self.dir_path) 263 # copy plot_card 264 for card in ['plot_card']: 265 if os.path.isfile(pjoin(self.dir_path, 'Cards',card + '.dat')): 266 try: 267 shutil.copy(pjoin(self.dir_path, 'Cards',card + '.dat'), 268 pjoin(self.dir_path, 'Cards', card + '_default.dat')) 269 except IOError: 270 logger.warning("Failed to copy " + card + ".dat to default") 271 elif os.getcwd() == os.path.realpath(self.dir_path): 272 logger.info('working in local directory: %s' % \ 273 os.path.realpath(self.dir_path)) 274 # misc.copytree since dir_path already exists 275 misc.copytree(pjoin(self.mgme_dir, 'Template/LO'), 276 self.dir_path) 277 # for name in misc.glob('Template/LO/*', self.mgme_dir): 278 # name = os.path.basename(name) 279 # filname = pjoin(self.mgme_dir, 'Template','LO',name) 280 # if os.path.isfile(filename): 281 # files.cp(filename, pjoin(self.dir_path,name)) 282 # elif os.path.isdir(filename): 283 # shutil.copytree(filename, pjoin(self.dir_path,name), True) 284 # misc.copytree since dir_path already exists 285 misc.copytree(pjoin(self.mgme_dir, 'Template/Common'), 286 self.dir_path) 287 # Copy plot_card 288 for card in ['plot_card']: 289 if os.path.isfile(pjoin(self.dir_path, 'Cards',card + '.dat')): 290 try: 291 shutil.copy(pjoin(self.dir_path, 'Cards', card + '.dat'), 292 pjoin(self.dir_path, 'Cards', card + '_default.dat')) 293 except IOError: 294 logger.warning("Failed to copy " + card + ".dat to default") 295 elif not os.path.isfile(pjoin(self.dir_path, 'TemplateVersion.txt')): 296 assert self.mgme_dir, \ 297 "No valid MG_ME path given for MG4 run directory creation." 298 try: 299 shutil.copy(pjoin(self.mgme_dir, 'MGMEVersion.txt'), self.dir_path) 300 except IOError: 301 MG5_version = misc.get_pkg_info() 302 open(pjoin(self.dir_path, 'MGMEVersion.txt'), 'w').write(MG5_version['version']) 303 304 #Ensure that the Template is clean 305 if self.opt['clean']: 306 logger.info('remove old information in %s' % \ 307 os.path.basename(self.dir_path)) 308 if 'MADGRAPH_BASE' in os.environ: 309 misc.call([pjoin('bin', 'internal', 'clean_template'), 310 '--web'], cwd=self.dir_path) 311 else: 312 try: 313 misc.call([pjoin('bin', 'internal', 'clean_template')], \ 314 cwd=self.dir_path) 315 except Exception as why: 316 raise MadGraph5Error('Failed to clean correctly %s: \n %s' \ 317 % (os.path.basename(self.dir_path),why)) 318 319 #Write version info 320 MG_version = misc.get_pkg_info() 321 open(pjoin(self.dir_path, 'SubProcesses', 'MGVersion.txt'), 'w').write( 322 MG_version['version']) 323 324 # add the makefile in Source directory 325 filename = pjoin(self.dir_path,'Source','makefile') 326 self.write_source_makefile(writers.FileWriter(filename)) 327 328 # add the DiscreteSampler information 329 files.cp(pjoin(MG5DIR,'vendor', 'DiscreteSampler', 'DiscreteSampler.f'), 330 pjoin(self.dir_path, 'Source')) 331 files.cp(pjoin(MG5DIR,'vendor', 'DiscreteSampler', 'StringCast.f'), 332 pjoin(self.dir_path, 'Source')) 333 334 # We need to create the correct open_data for the pdf 335 self.write_pdf_opendata()
336 337 338 #=========================================================================== 339 # Call MadAnalysis5 to generate the default cards for this process 340 #===========================================================================
341 - def create_default_madanalysis5_cards(self, history, proc_defs, processes, 342 ma5_path, output_dir, levels = ['parton','hadron']):
343 """ Call MA5 so that it writes default cards for both parton and 344 post-shower levels, tailored for this particular process.""" 345 346 if len(levels)==0: 347 return 348 start = time.time() 349 logger.info('Generating MadAnalysis5 default cards tailored to this process') 350 try: 351 MA5_interpreter = common_run_interface.CommonRunCmd.\ 352 get_MadAnalysis5_interpreter(MG5DIR,ma5_path,loglevel=100) 353 except (Exception, SystemExit) as e: 354 logger.warning('Fail to create a MadAnalysis5 instance. Therefore the default analysis with MadAnalysis5 will be empty') 355 return 356 if MA5_interpreter is None: 357 return 358 359 MA5_main = MA5_interpreter.main 360 for lvl in ['parton','hadron']: 361 if lvl in levels: 362 card_to_generate = pjoin(output_dir,'madanalysis5_%s_card_default.dat'%lvl) 363 try: 364 text = MA5_main.madgraph.generate_card(history, proc_defs, processes,lvl) 365 except (Exception, SystemExit) as e: 366 # keep the default card (skip only) 367 logger.warning('MadAnalysis5 failed to write a %s-level'%lvl+ 368 ' default analysis card for this process.') 369 logger.warning('Therefore, %s-level default analysis with MadAnalysis5 will be empty.'%lvl) 370 error=StringIO() 371 traceback.print_exc(file=error) 372 logger.debug('MadAnalysis5 error was:') 373 logger.debug('-'*60) 374 logger.debug(error.getvalue()[:-1]) 375 logger.debug('-'*60) 376 else: 377 open(card_to_generate,'w').write(text) 378 stop = time.time() 379 if stop-start >1: 380 logger.info('Cards created in %.2fs' % (stop-start))
381 382 #=========================================================================== 383 # write a procdef_mg5 (an equivalent of the MG4 proc_card.dat) 384 #===========================================================================
385 - def write_procdef_mg5(self, file_pos, modelname, process_str):
386 """ write an equivalent of the MG4 proc_card in order that all the Madevent 387 Perl script of MadEvent4 are still working properly for pure MG5 run.""" 388 389 proc_card_template = template_files.mg4_proc_card.mg4_template 390 process_template = template_files.mg4_proc_card.process_template 391 process_text = '' 392 coupling = '' 393 new_process_content = [] 394 395 396 # First find the coupling and suppress the coupling from process_str 397 #But first ensure that coupling are define whithout spaces: 398 process_str = process_str.replace(' =', '=') 399 process_str = process_str.replace('= ', '=') 400 process_str = process_str.replace(',',' , ') 401 #now loop on the element and treat all the coupling 402 for info in process_str.split(): 403 if '=' in info: 404 coupling += info + '\n' 405 else: 406 new_process_content.append(info) 407 # Recombine the process_str (which is the input process_str without coupling 408 #info) 409 process_str = ' '.join(new_process_content) 410 411 #format the SubProcess 412 replace_dict = {'process': process_str, 413 'coupling': coupling} 414 process_text += process_template.substitute(replace_dict) 415 416 replace_dict = {'process': process_text, 417 'model': modelname, 418 'multiparticle':''} 419 text = proc_card_template.substitute(replace_dict) 420 421 if file_pos: 422 ff = open(file_pos, 'w') 423 ff.write(text) 424 ff.close() 425 else: 426 return replace_dict
427 428
429 - def pass_information_from_cmd(self, cmd):
430 """Pass information for MA5""" 431 432 self.proc_defs = cmd._curr_proc_defs
433 434 #=========================================================================== 435 # Create jpeg diagrams, html pages,proc_card_mg5.dat and madevent.tar.gz 436 #===========================================================================
437 - def finalize(self, matrix_elements, history='', mg5options={}, flaglist=[]):
438 """Function to finalize v4 directory, for inheritance.""" 439 440 self.create_run_card(matrix_elements, history) 441 self.create_MA5_cards(matrix_elements, history)
442
443 - def create_MA5_cards(self,matrix_elements,history):
444 """ A wrapper around the creation of the MA5 cards so that it can be 445 bypassed by daughter classes (i.e. in standalone).""" 446 if 'madanalysis5_path' in self.opt and not \ 447 self.opt['madanalysis5_path'] is None and not self.proc_defs is None: 448 processes = None 449 if isinstance(matrix_elements, group_subprocs.SubProcessGroupList): 450 processes = [me.get('processes') for megroup in matrix_elements 451 for me in megroup['matrix_elements']] 452 elif matrix_elements: 453 processes = [me.get('processes') 454 for me in matrix_elements['matrix_elements']] 455 456 self.create_default_madanalysis5_cards( 457 history, self.proc_defs, processes, 458 self.opt['madanalysis5_path'], pjoin(self.dir_path,'Cards'), 459 levels = ['hadron','parton']) 460 461 for level in ['hadron','parton']: 462 # Copying these cards turn on the use of MadAnalysis5 by default. 463 if os.path.isfile(pjoin(self.dir_path,'Cards','madanalysis5_%s_card_default.dat'%level)): 464 shutil.copy(pjoin(self.dir_path,'Cards','madanalysis5_%s_card_default.dat'%level), 465 pjoin(self.dir_path,'Cards','madanalysis5_%s_card.dat'%level))
466 467 #=========================================================================== 468 # Create the proc_characteristic file passing information to the run_interface 469 #===========================================================================
470 - def create_proc_charac(self, matrix_elements=None, history="", **opts):
471 472 self.proc_characteristic.write(pjoin(self.dir_path, 'SubProcesses', 'proc_characteristics'))
473 474 #=========================================================================== 475 # write_matrix_element_v4 476 #===========================================================================
477 - def write_matrix_element_v4(self):
478 """Function to write a matrix.f file, for inheritance. 479 """ 480 pass
481 482 #=========================================================================== 483 # write_pdf_opendata 484 #===========================================================================
485 - def write_pdf_opendata(self):
486 """ modify the pdf opendata file, to allow direct access to cluster node 487 repository if configure""" 488 489 if not self.opt["cluster_local_path"]: 490 changer = {"pdf_systemwide": ""} 491 else: 492 to_add = """ 493 tempname='%(path)s'//Tablefile 494 open(IU,file=tempname,status='old',ERR=1) 495 return 496 1 tempname='%(path)s/Pdfdata/'//Tablefile 497 open(IU,file=tempname,status='old',ERR=2) 498 return 499 2 tempname='%(path)s/lhapdf'//Tablefile 500 open(IU,file=tempname,status='old',ERR=3) 501 return 502 3 tempname='%(path)s/../lhapdf/pdfsets/'//Tablefile 503 open(IU,file=tempname,status='old',ERR=4) 504 return 505 4 tempname='%(path)s/../lhapdf/pdfsets/6.1/'//Tablefile 506 open(IU,file=tempname,status='old',ERR=5) 507 return 508 """ % {"path" : self.opt["cluster_local_path"]} 509 510 changer = {"pdf_systemwide": to_add} 511 512 513 ff = writers.FortranWriter(pjoin(self.dir_path, "Source", "PDF", "opendata.f")) 514 template = open(pjoin(MG5DIR, "madgraph", "iolibs", "template_files", "pdf_opendata.f"),"r").read() 515 ff.writelines(template % changer) 516 517 # Do the same for lhapdf set 518 if not self.opt["cluster_local_path"]: 519 changer = {"cluster_specific_path": ""} 520 else: 521 to_add=""" 522 LHAPath='%(path)s/PDFsets' 523 Inquire(File=LHAPath, exist=exists) 524 if(exists)return 525 LHAPath='%(path)s/../lhapdf/pdfsets/6.1/' 526 Inquire(File=LHAPath, exist=exists) 527 if(exists)return 528 LHAPath='%(path)s/../lhapdf/pdfsets/' 529 Inquire(File=LHAPath, exist=exists) 530 if(exists)return 531 LHAPath='./PDFsets' 532 """ % {"path" : self.opt["cluster_local_path"]} 533 changer = {"cluster_specific_path": to_add} 534 535 ff = writers.FortranWriter(pjoin(self.dir_path, "Source", "PDF", "pdfwrap_lhapdf.f")) 536 #ff = open(pjoin(self.dir_path, "Source", "PDF", "pdfwrap_lhapdf.f"),"w") 537 template = open(pjoin(MG5DIR, "madgraph", "iolibs", "template_files", "pdf_wrap_lhapdf.f"),"r").read() 538 ff.writelines(template % changer) 539 540 541 return
542 543 544 545 #=========================================================================== 546 # write_maxparticles_file 547 #===========================================================================
548 - def write_maxparticles_file(self, writer, matrix_elements):
549 """Write the maxparticles.inc file for MadEvent""" 550 551 if isinstance(matrix_elements, helas_objects.HelasMultiProcess): 552 maxparticles = max([me.get_nexternal_ninitial()[0] for me in \ 553 matrix_elements.get('matrix_elements')]) 554 else: 555 maxparticles = max([me.get_nexternal_ninitial()[0] \ 556 for me in matrix_elements]) 557 558 lines = "integer max_particles\n" 559 lines += "parameter(max_particles=%d)" % maxparticles 560 561 # Write the file 562 writer.writelines(lines) 563 564 return True
565 566 567 #=========================================================================== 568 # export the model 569 #===========================================================================
570 - def export_model_files(self, model_path):
571 """Configure the files/link of the process according to the model""" 572 573 # Import the model 574 for file in os.listdir(model_path): 575 if os.path.isfile(pjoin(model_path, file)): 576 shutil.copy2(pjoin(model_path, file), \ 577 pjoin(self.dir_path, 'Source', 'MODEL'))
578 579 593 601 602 603 #=========================================================================== 604 # export the helas routine 605 #===========================================================================
606 - def export_helas(self, helas_path):
607 """Configure the files/link of the process according to the model""" 608 609 # Import helas routine 610 for filename in os.listdir(helas_path): 611 filepos = pjoin(helas_path, filename) 612 if os.path.isfile(filepos): 613 if filepos.endswith('Makefile.template'): 614 cp(filepos, self.dir_path + '/Source/DHELAS/Makefile') 615 elif filepos.endswith('Makefile'): 616 pass 617 else: 618 cp(filepos, self.dir_path + '/Source/DHELAS')
619 # following lines do the same but whithout symbolic link 620 # 621 #def export_helas(mgme_dir, dir_path): 622 # 623 # # Copy the HELAS directory 624 # helas_dir = pjoin(mgme_dir, 'HELAS') 625 # for filename in os.listdir(helas_dir): 626 # if os.path.isfile(pjoin(helas_dir, filename)): 627 # shutil.copy2(pjoin(helas_dir, filename), 628 # pjoin(dir_path, 'Source', 'DHELAS')) 629 # shutil.move(pjoin(dir_path, 'Source', 'DHELAS', 'Makefile.template'), 630 # pjoin(dir_path, 'Source', 'DHELAS', 'Makefile')) 631 # 632 633 #=========================================================================== 634 # generate_subprocess_directory 635 #===========================================================================
636 - def generate_subprocess_directory(self, matrix_element, 637 fortran_model, 638 me_number):
639 """Routine to generate a subprocess directory (for inheritance)""" 640 641 pass
642 643 #=========================================================================== 644 # get_source_libraries_list 645 #===========================================================================
646 - def get_source_libraries_list(self):
647 """ Returns the list of libraries to be compiling when compiling the 648 SOURCE directory. It is different for loop_induced processes and 649 also depends on the value of the 'output_dependencies' option""" 650 651 return ['$(LIBDIR)libdhelas.$(libext)', 652 '$(LIBDIR)libpdf.$(libext)', 653 '$(LIBDIR)libmodel.$(libext)', 654 '$(LIBDIR)libcernlib.$(libext)', 655 '$(LIBDIR)libbias.$(libext)']
656 657 #=========================================================================== 658 # write_source_makefile 659 #===========================================================================
660 - def write_source_makefile(self, writer):
661 """Write the nexternal.inc file for MG4""" 662 663 path = pjoin(_file_path,'iolibs','template_files','madevent_makefile_source') 664 set_of_lib = ' '.join(['$(LIBRARIES)']+self.get_source_libraries_list()) 665 if self.opt['model'] == 'mssm' or self.opt['model'].startswith('mssm-'): 666 model_line='''$(LIBDIR)libmodel.$(libext): MODEL param_card.inc\n\tcd MODEL; make 667 MODEL/MG5_param.dat: ../Cards/param_card.dat\n\t../bin/madevent treatcards param 668 param_card.inc: MODEL/MG5_param.dat\n\t../bin/madevent treatcards param\n''' 669 else: 670 model_line='''$(LIBDIR)libmodel.$(libext): MODEL param_card.inc\n\tcd MODEL; make 671 param_card.inc: ../Cards/param_card.dat\n\t../bin/madevent treatcards param\n''' 672 673 replace_dict= {'libraries': set_of_lib, 674 'model':model_line, 675 'additional_dsample': '', 676 'additional_dependencies':''} 677 678 if writer: 679 text = open(path).read() % replace_dict 680 writer.write(text) 681 682 return replace_dict
683 684 #=========================================================================== 685 # write_nexternal_madspin 686 #===========================================================================
687 - def write_nexternal_madspin(self, writer, nexternal, ninitial):
688 """Write the nexternal_prod.inc file for madspin""" 689 690 replace_dict = {} 691 692 replace_dict['nexternal'] = nexternal 693 replace_dict['ninitial'] = ninitial 694 695 file = """ \ 696 integer nexternal_prod 697 parameter (nexternal_prod=%(nexternal)d) 698 integer nincoming_prod 699 parameter (nincoming_prod=%(ninitial)d)""" % replace_dict 700 701 # Write the file 702 if writer: 703 writer.writelines(file) 704 return True 705 else: 706 return replace_dict
707 708 #=========================================================================== 709 # write_helamp_madspin 710 #===========================================================================
711 - def write_helamp_madspin(self, writer, ncomb):
712 """Write the helamp.inc file for madspin""" 713 714 replace_dict = {} 715 716 replace_dict['ncomb'] = ncomb 717 718 file = """ \ 719 integer ncomb1 720 parameter (ncomb1=%(ncomb)d) 721 double precision helamp(ncomb1) 722 common /to_helamp/helamp """ % replace_dict 723 724 # Write the file 725 if writer: 726 writer.writelines(file) 727 return True 728 else: 729 return replace_dict
730 731 732 733 #=========================================================================== 734 # write_nexternal_file 735 #===========================================================================
736 - def write_nexternal_file(self, writer, nexternal, ninitial):
737 """Write the nexternal.inc file for MG4""" 738 739 replace_dict = {} 740 741 replace_dict['nexternal'] = nexternal 742 replace_dict['ninitial'] = ninitial 743 744 file = """ \ 745 integer nexternal 746 parameter (nexternal=%(nexternal)d) 747 integer nincoming 748 parameter (nincoming=%(ninitial)d)""" % replace_dict 749 750 # Write the file 751 if writer: 752 writer.writelines(file) 753 return True 754 else: 755 return replace_dict
756 #=========================================================================== 757 # write_pmass_file 758 #===========================================================================
759 - def write_pmass_file(self, writer, matrix_element):
760 """Write the pmass.inc file for MG4""" 761 762 model = matrix_element.get('processes')[0].get('model') 763 764 lines = [] 765 for wf in matrix_element.get_external_wavefunctions(): 766 mass = model.get('particle_dict')[wf.get('pdg_code')].get('mass') 767 if mass.lower() != "zero": 768 mass = "abs(%s)" % mass 769 770 lines.append("pmass(%d)=%s" % \ 771 (wf.get('number_external'), mass)) 772 773 # Write the file 774 writer.writelines(lines) 775 776 return True
777 778 #=========================================================================== 779 # write_ngraphs_file 780 #===========================================================================
781 - def write_ngraphs_file(self, writer, nconfigs):
782 """Write the ngraphs.inc file for MG4. Needs input from 783 write_configs_file.""" 784 785 file = " integer n_max_cg\n" 786 file = file + "parameter (n_max_cg=%d)" % nconfigs 787 788 # Write the file 789 writer.writelines(file) 790 791 return True
792 793 #=========================================================================== 794 # write_leshouche_file 795 #===========================================================================
796 - def write_leshouche_file(self, writer, matrix_element):
797 """Write the leshouche.inc file for MG4""" 798 799 # Write the file 800 writer.writelines(self.get_leshouche_lines(matrix_element, 0)) 801 802 return True
803 804 #=========================================================================== 805 # get_leshouche_lines 806 #===========================================================================
807 - def get_leshouche_lines(self, matrix_element, numproc):
808 """Write the leshouche.inc file for MG4""" 809 810 # Extract number of external particles 811 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 812 813 lines = [] 814 for iproc, proc in enumerate(matrix_element.get('processes')): 815 legs = proc.get_legs_with_decays() 816 lines.append("DATA (IDUP(i,%d,%d),i=1,%d)/%s/" % \ 817 (iproc + 1, numproc+1, nexternal, 818 ",".join([str(l.get('id')) for l in legs]))) 819 if iproc == 0 and numproc == 0: 820 for i in [1, 2]: 821 lines.append("DATA (MOTHUP(%d,i),i=1,%2r)/%s/" % \ 822 (i, nexternal, 823 ",".join([ "%3r" % 0 ] * ninitial + \ 824 [ "%3r" % i ] * (nexternal - ninitial)))) 825 826 # Here goes the color connections corresponding to the JAMPs 827 # Only one output, for the first subproc! 828 if iproc == 0: 829 # If no color basis, just output trivial color flow 830 if not matrix_element.get('color_basis'): 831 for i in [1, 2]: 832 lines.append("DATA (ICOLUP(%d,i,1,%d),i=1,%2r)/%s/" % \ 833 (i, numproc+1,nexternal, 834 ",".join([ "%3r" % 0 ] * nexternal))) 835 836 else: 837 # First build a color representation dictionnary 838 repr_dict = {} 839 for l in legs: 840 repr_dict[l.get('number')] = \ 841 proc.get('model').get_particle(l.get('id')).get_color()\ 842 * (-1)**(1+l.get('state')) 843 # Get the list of color flows 844 color_flow_list = \ 845 matrix_element.get('color_basis').color_flow_decomposition(repr_dict, 846 ninitial) 847 # And output them properly 848 for cf_i, color_flow_dict in enumerate(color_flow_list): 849 for i in [0, 1]: 850 lines.append("DATA (ICOLUP(%d,i,%d,%d),i=1,%2r)/%s/" % \ 851 (i + 1, cf_i + 1, numproc+1, nexternal, 852 ",".join(["%3r" % color_flow_dict[l.get('number')][i] \ 853 for l in legs]))) 854 855 return lines
856 857 858 859 860 #=========================================================================== 861 # write_maxamps_file 862 #===========================================================================
863 - def write_maxamps_file(self, writer, maxamps, maxflows, 864 maxproc,maxsproc):
865 """Write the maxamps.inc file for MG4.""" 866 867 file = " integer maxamps, maxflow, maxproc, maxsproc\n" 868 file = file + "parameter (maxamps=%d, maxflow=%d)\n" % \ 869 (maxamps, maxflows) 870 file = file + "parameter (maxproc=%d, maxsproc=%d)" % \ 871 (maxproc, maxsproc) 872 873 # Write the file 874 writer.writelines(file) 875 876 return True
877 878 879 #=========================================================================== 880 # Routines to output UFO models in MG4 format 881 #=========================================================================== 882
883 - def convert_model(self, model, wanted_lorentz = [], 884 wanted_couplings = []):
885 """ Create a full valid MG4 model from a MG5 model (coming from UFO)""" 886 887 # Make sure aloha is in quadruple precision if needed 888 old_aloha_mp=aloha.mp_precision 889 aloha.mp_precision=self.opt['mp'] 890 self.model = model 891 # create the MODEL 892 write_dir=pjoin(self.dir_path, 'Source', 'MODEL') 893 self.opt['exporter'] = self.__class__ 894 model_builder = UFO_model_to_mg4(model, write_dir, self.opt + self.proc_characteristic) 895 model_builder.build(wanted_couplings) 896 897 # Backup the loop mode, because it can be changed in what follows. 898 old_loop_mode = aloha.loop_mode 899 900 # Create the aloha model or use the existing one (for loop exporters 901 # this is useful as the aloha model will be used again in the 902 # LoopHelasMatrixElements generated). We do not save the model generated 903 # here if it didn't exist already because it would be a waste of 904 # memory for tree level applications since aloha is only needed at the 905 # time of creating the aloha fortran subroutines. 906 if hasattr(self, 'aloha_model'): 907 aloha_model = self.aloha_model 908 else: 909 try: 910 with misc.MuteLogger(['madgraph.models'], [60]): 911 aloha_model = create_aloha.AbstractALOHAModel(os.path.basename(model.get('modelpath'))) 912 except (ImportError, UFOError): 913 aloha_model = create_aloha.AbstractALOHAModel(model.get('modelpath')) 914 aloha_model.add_Lorentz_object(model.get('lorentz')) 915 916 # Compute the subroutines 917 if wanted_lorentz: 918 aloha_model.compute_subset(wanted_lorentz) 919 else: 920 aloha_model.compute_all(save=False) 921 922 # Write them out 923 write_dir=pjoin(self.dir_path, 'Source', 'DHELAS') 924 aloha_model.write(write_dir, 'Fortran') 925 926 # Revert the original aloha loop mode 927 aloha.loop_mode = old_loop_mode 928 929 #copy Helas Template 930 cp(MG5DIR + '/aloha/template_files/Makefile_F', write_dir+'/makefile') 931 if any([any([tag.startswith('L') for tag in d[1]]) for d in wanted_lorentz]): 932 cp(MG5DIR + '/aloha/template_files/aloha_functions_loop.f', 933 write_dir+'/aloha_functions.f') 934 aloha_model.loop_mode = False 935 else: 936 cp(MG5DIR + '/aloha/template_files/aloha_functions.f', 937 write_dir+'/aloha_functions.f') 938 create_aloha.write_aloha_file_inc(write_dir, '.f', '.o') 939 940 # Make final link in the Process 941 self.make_model_symbolic_link() 942 943 # Re-establish original aloha mode 944 aloha.mp_precision=old_aloha_mp
945 946 947 #=========================================================================== 948 # Helper functions 949 #===========================================================================
950 - def modify_grouping(self, matrix_element):
951 """allow to modify the grouping (if grouping is in place) 952 return two value: 953 - True/False if the matrix_element was modified 954 - the new(or old) matrix element""" 955 956 return False, matrix_element
957 958 #=========================================================================== 959 # Helper functions 960 #===========================================================================
961 - def get_mg5_info_lines(self):
962 """Return info lines for MG5, suitable to place at beginning of 963 Fortran files""" 964 965 info = misc.get_pkg_info() 966 info_lines = "" 967 if info and 'version' in info and 'date' in info: 968 info_lines = "# Generated by MadGraph5_aMC@NLO v. %s, %s\n" % \ 969 (info['version'], info['date']) 970 info_lines = info_lines + \ 971 "# By the MadGraph5_aMC@NLO Development Team\n" + \ 972 "# Visit launchpad.net/madgraph5 and amcatnlo.web.cern.ch" 973 else: 974 info_lines = "# Generated by MadGraph5_aMC@NLO\n" + \ 975 "# By the MadGraph5_aMC@NLO Development Team\n" + \ 976 "# Visit launchpad.net/madgraph5 and amcatnlo.web.cern.ch" 977 978 return info_lines
979
980 - def get_process_info_lines(self, matrix_element):
981 """Return info lines describing the processes for this matrix element""" 982 983 return"\n".join([ "C " + process.nice_string().replace('\n', '\nC * ') \ 984 for process in matrix_element.get('processes')])
985 986
987 - def get_helicity_lines(self, matrix_element,array_name='NHEL'):
988 """Return the Helicity matrix definition lines for this matrix element""" 989 990 helicity_line_list = [] 991 i = 0 992 for helicities in matrix_element.get_helicity_matrix(): 993 i = i + 1 994 int_list = [i, len(helicities)] 995 int_list.extend(helicities) 996 helicity_line_list.append(\ 997 ("DATA ("+array_name+"(I,%4r),I=1,%d) /" + \ 998 ",".join(['%2r'] * len(helicities)) + "/") % tuple(int_list)) 999 1000 return "\n".join(helicity_line_list)
1001
1002 - def get_ic_line(self, matrix_element):
1003 """Return the IC definition line coming after helicities, required by 1004 switchmom in madevent""" 1005 1006 nexternal = matrix_element.get_nexternal_ninitial()[0] 1007 int_list = list(range(1, nexternal + 1)) 1008 1009 return "DATA (IC(I,1),I=1,%i) /%s/" % (nexternal, 1010 ",".join([str(i) for \ 1011 i in int_list]))
1012
1013 - def set_chosen_SO_index(self, process, squared_orders):
1014 """ From the squared order constraints set by the user, this function 1015 finds what indices of the squared_orders list the user intends to pick. 1016 It returns this as a string of comma-separated successive '.true.' or 1017 '.false.' for each index.""" 1018 1019 user_squared_orders = process.get('squared_orders') 1020 split_orders = process.get('split_orders') 1021 1022 if len(user_squared_orders)==0: 1023 return ','.join(['.true.']*len(squared_orders)) 1024 1025 res = [] 1026 for sqsos in squared_orders: 1027 is_a_match = True 1028 for user_sqso, value in user_squared_orders.items(): 1029 if user_sqso == 'WEIGHTED' : 1030 logger.debug('WEIGHTED^2%s%s encoutered. Please check behavior for' + \ 1031 'https://bazaar.launchpad.net/~maddevelopers/mg5amcnlo/3.0.1/revision/613', \ 1032 (process.get_squared_order_type(user_sqso), sqsos[split_orders.index(user_sqso)])) 1033 if user_sqso not in split_orders: 1034 is_a_match = False 1035 elif (process.get_squared_order_type(user_sqso) =='==' and \ 1036 value!=sqsos[split_orders.index(user_sqso)]) or \ 1037 (process.get_squared_order_type(user_sqso) in ['<=','='] and \ 1038 value<sqsos[split_orders.index(user_sqso)]) or \ 1039 (process.get_squared_order_type(user_sqso) == '>' and \ 1040 value>=sqsos[split_orders.index(user_sqso)]): 1041 is_a_match = False 1042 break 1043 res.append('.true.' if is_a_match else '.false.') 1044 1045 return ','.join(res)
1046
1047 - def get_split_orders_lines(self, orders, array_name, n=5):
1048 """ Return the split orders definition as defined in the list orders and 1049 for the name of the array 'array_name'. Split rows in chunks of size n.""" 1050 1051 ret_list = [] 1052 for index, order in enumerate(orders): 1053 for k in range(0, len(order), n): 1054 ret_list.append("DATA (%s(%3r,i),i=%3r,%3r) /%s/" % \ 1055 (array_name,index + 1, k + 1, min(k + n, len(order)), 1056 ','.join(["%5r" % i for i in order[k:k + n]]))) 1057 return ret_list
1058
1059 - def format_integer_list(self, list, name, n=5):
1060 """ Return an initialization of the python list in argument following 1061 the fortran syntax using the data keyword assignment, filling an array 1062 of name 'name'. It splits rows in chunks of size n.""" 1063 1064 ret_list = [] 1065 for k in range(0, len(list), n): 1066 ret_list.append("DATA (%s(i),i=%3r,%3r) /%s/" % \ 1067 (name, k + 1, min(k + n, len(list)), 1068 ','.join(["%5r" % i for i in list[k:k + n]]))) 1069 return ret_list
1070
1071 - def get_color_data_lines(self, matrix_element, n=6):
1072 """Return the color matrix definition lines for this matrix element. Split 1073 rows in chunks of size n.""" 1074 1075 if not matrix_element.get('color_matrix'): 1076 return ["DATA Denom(1)/1/", "DATA (CF(i,1),i=1,1) /1/"] 1077 else: 1078 ret_list = [] 1079 my_cs = color.ColorString() 1080 for index, denominator in \ 1081 enumerate(matrix_element.get('color_matrix').\ 1082 get_line_denominators()): 1083 # First write the common denominator for this color matrix line 1084 #ret_list.append("DATA Denom(%i)/%i/" % (index + 1, denominator)) 1085 # Then write the numerators for the matrix elements 1086 num_list = matrix_element.get('color_matrix').\ 1087 get_line_numerators(index, denominator) 1088 1089 assert all([int(i)==i for i in num_list]) 1090 1091 for k in range(0, len(num_list), n): 1092 ret_list.append("DATA (CF(i,%3r),i=%3r,%3r) /%s/" % \ 1093 (index + 1, k + 1, min(k + n, len(num_list)), 1094 ','.join([("%.15e" % (int(i)/denominator)).replace('e','d') for i in num_list[k:k + n]]))) 1095 1096 my_cs.from_immutable(sorted(matrix_element.get('color_basis').keys())[index]) 1097 ret_list.append("C %s" % repr(my_cs)) 1098 return ret_list
1099 1100
1101 - def get_den_factor_line(self, matrix_element):
1102 """Return the denominator factor line for this matrix element""" 1103 1104 return "DATA IDEN/%2r/" % \ 1105 matrix_element.get_denominator_factor()
1106
1107 - def get_icolamp_lines(self, mapconfigs, matrix_element, num_matrix_element):
1108 """Return the ICOLAMP matrix, showing which JAMPs contribute to 1109 which configs (diagrams).""" 1110 1111 ret_list = [] 1112 1113 booldict = {False: ".false.", True: ".true."} 1114 1115 if not matrix_element.get('color_basis'): 1116 # No color, so only one color factor. Simply write a ".true." 1117 # for each config (i.e., each diagram with only 3 particle 1118 # vertices 1119 configs = len(mapconfigs) 1120 ret_list.append("DATA(icolamp(1,i,%d),i=1,%d)/%s/" % \ 1121 (num_matrix_element, configs, 1122 ','.join([".true." for i in range(configs)]))) 1123 return ret_list 1124 1125 1126 # There is a color basis - create a list showing which JAMPs have 1127 # contributions to which configs 1128 1129 # Only want to include leading color flows, so find max_Nc 1130 color_basis = matrix_element.get('color_basis') 1131 1132 # We don't want to include the power of Nc's which come from the potential 1133 # loop color trace (i.e. in the case of a closed fermion loop for example) 1134 # so we subtract it here when computing max_Nc 1135 max_Nc = max(sum([[(v[4]-v[5]) for v in val] for val in 1136 color_basis.values()],[])) 1137 1138 # Crate dictionary between diagram number and JAMP number 1139 diag_jamp = {} 1140 for ijamp, col_basis_elem in \ 1141 enumerate(sorted(matrix_element.get('color_basis').keys())): 1142 for diag_tuple in matrix_element.get('color_basis')[col_basis_elem]: 1143 # Only use color flows with Nc == max_Nc. However, notice that 1144 # we don't want to include the Nc power coming from the loop 1145 # in this counting. 1146 if (diag_tuple[4]-diag_tuple[5]) == max_Nc: 1147 diag_num = diag_tuple[0] + 1 1148 # Add this JAMP number to this diag_num 1149 diag_jamp[diag_num] = diag_jamp.setdefault(diag_num, []) + \ 1150 [ijamp+1] 1151 else: 1152 self.proc_characteristic['single_color'] = False 1153 1154 colamps = ijamp + 1 1155 for iconfig, num_diag in enumerate(mapconfigs): 1156 if num_diag == 0: 1157 continue 1158 1159 # List of True or False 1160 bool_list = [(i + 1 in diag_jamp[num_diag]) for i in range(colamps)] 1161 # Add line 1162 ret_list.append("DATA(icolamp(i,%d,%d),i=1,%d)/%s/" % \ 1163 (iconfig+1, num_matrix_element, colamps, 1164 ','.join(["%s" % booldict[b] for b in \ 1165 bool_list]))) 1166 1167 return ret_list
1168
1169 - def get_amp2_lines(self, matrix_element, config_map = [], replace_dict=None):
1170 """Return the amp2(i) = sum(amp for diag(i))^2 lines""" 1171 1172 nexternal, ninitial = matrix_element.get_nexternal_ninitial() 1173 # Get minimum legs in a vertex 1174 vert_list = [max(diag.get_vertex_leg_numbers()) for diag in \ 1175 matrix_element.get('diagrams') if diag.get_vertex_leg_numbers()!=[]] 1176 minvert = min(vert_list) if vert_list!=[] else 0 1177 1178 ret_lines = [] 1179 if config_map: 1180 # In this case, we need to sum up all amplitudes that have 1181 # identical topologies, as given by the config_map (which 1182 # gives the topology/config for each of the diagrams 1183 diagrams = matrix_element.get('diagrams') 1184 # Combine the diagrams with identical topologies 1185 config_to_diag_dict = {} 1186 for idiag, diag in enumerate(matrix_element.get('diagrams')): 1187 if config_map[idiag] == 0: 1188 continue 1189 try: 1190 config_to_diag_dict[config_map[idiag]].append(idiag) 1191 except KeyError: 1192 config_to_diag_dict[config_map[idiag]] = [idiag] 1193 # Write out the AMP2s summing squares of amplitudes belonging 1194 # to eiher the same diagram or different diagrams with 1195 # identical propagator properties. Note that we need to use 1196 # AMP2 number corresponding to the first diagram number used 1197 # for that AMP2. 1198 for config in sorted(config_to_diag_dict.keys()): 1199 1200 line = "AMP2(%(num)d)=AMP2(%(num)d)+" % \ 1201 {"num": (config_to_diag_dict[config][0] + 1)} 1202 1203 amp = "+".join(["AMP(%(num)d)" % {"num": a.get('number')} for a in \ 1204 sum([diagrams[idiag].get('amplitudes') for \ 1205 idiag in config_to_diag_dict[config]], [])]) 1206 1207 # Not using \sum |M|^2 anymore since this creates troubles 1208 # when ckm is not diagonal due to the JIM mechanism. 1209 if '+' in amp: 1210 amp = "(%s)*dconjg(%s)" % (amp, amp) 1211 else: 1212 amp = "%s*dconjg(%s)" % (amp, amp) 1213 1214 line = line + "%s" % (amp) 1215 #line += " * get_channel_cut(p, %s) " % (config) 1216 ret_lines.append(line) 1217 else: 1218 for idiag, diag in enumerate(matrix_element.get('diagrams')): 1219 # Ignore any diagrams with 4-particle vertices. 1220 if diag.get_vertex_leg_numbers()!=[] and max(diag.get_vertex_leg_numbers()) > minvert: 1221 continue 1222 # Now write out the expression for AMP2, meaning the sum of 1223 # squared amplitudes belonging to the same diagram 1224 line = "AMP2(%(num)d)=AMP2(%(num)d)+" % {"num": (idiag + 1)} 1225 line += "+".join(["AMP(%(num)d)*dconjg(AMP(%(num)d))" % \ 1226 {"num": a.get('number')} for a in \ 1227 diag.get('amplitudes')]) 1228 ret_lines.append(line) 1229 1230 return ret_lines
1231 1232 #=========================================================================== 1233 # Returns the data statements initializing the coeffictients for the JAMP 1234 # decomposition. It is used when the JAMP initialization is decided to be 1235 # done through big arrays containing the projection coefficients. 1236 #===========================================================================
1237 - def get_JAMP_coefs(self, color_amplitudes, color_basis=None, tag_letter="",\ 1238 n=50, Nc_value=3):
1239 """This functions return the lines defining the DATA statement setting 1240 the coefficients building the JAMPS out of the AMPS. Split rows in 1241 bunches of size n. 1242 One can specify the color_basis from which the color amplitudes originates 1243 so that there are commentaries telling what color structure each JAMP 1244 corresponds to.""" 1245 1246 if(not isinstance(color_amplitudes,list) or 1247 not (color_amplitudes and isinstance(color_amplitudes[0],list))): 1248 raise MadGraph5Error("Incorrect col_amps argument passed to get_JAMP_coefs") 1249 1250 res_list = [] 1251 my_cs = color.ColorString() 1252 for index, coeff_list in enumerate(color_amplitudes): 1253 # Create the list of the complete numerical coefficient. 1254 coefs_list=[coefficient[0][0]*coefficient[0][1]*\ 1255 (fractions.Fraction(Nc_value)**coefficient[0][3]) for \ 1256 coefficient in coeff_list] 1257 # Create the list of the numbers of the contributing amplitudes. 1258 # Mutliply by -1 for those which have an imaginary coefficient. 1259 ampnumbers_list=[coefficient[1]*(-1 if coefficient[0][2] else 1) \ 1260 for coefficient in coeff_list] 1261 # Find the common denominator. 1262 commondenom=abs(reduce(fractions.gcd, coefs_list).denominator) 1263 num_list=[(coefficient*commondenom).numerator \ 1264 for coefficient in coefs_list] 1265 res_list.append("DATA NCONTRIBAMPS%s(%i)/%i/"%(tag_letter,\ 1266 index+1,len(num_list))) 1267 res_list.append("DATA DENOMCCOEF%s(%i)/%i/"%(tag_letter,\ 1268 index+1,commondenom)) 1269 if color_basis: 1270 my_cs.from_immutable(sorted(color_basis.keys())[index]) 1271 res_list.append("C %s" % repr(my_cs)) 1272 for k in range(0, len(num_list), n): 1273 res_list.append("DATA (NUMCCOEF%s(%3r,i),i=%6r,%6r) /%s/" % \ 1274 (tag_letter,index + 1, k + 1, min(k + n, len(num_list)), 1275 ','.join(["%6r" % i for i in num_list[k:k + n]]))) 1276 res_list.append("DATA (AMPNUMBERS%s(%3r,i),i=%6r,%6r) /%s/" % \ 1277 (tag_letter,index + 1, k + 1, min(k + n, len(num_list)), 1278 ','.join(["%6r" % i for i in ampnumbers_list[k:k + n]]))) 1279 pass 1280 return res_list
1281 1282
1283 - def get_JAMP_lines_split_order(self, col_amps, split_order_amps, 1284 split_order_names=None, JAMP_format="JAMP(%s,{0})", AMP_format="AMP(%s)"):
1285 """Return the JAMP = sum(fermionfactor * AMP(i)) lines from col_amps 1286 defined as a matrix element or directly as a color_amplitudes dictionary. 1287 The split_order_amps specifies the group of amplitudes sharing the same 1288 amplitude orders which should be put in together in a given set of JAMPS. 1289 The split_order_amps is supposed to have the format of the second output 1290 of the function get_split_orders_mapping function in helas_objects.py. 1291 The split_order_names is optional (it should correspond to the process 1292 'split_orders' attribute) and only present to provide comments in the 1293 JAMP definitions in the code.""" 1294 1295 # Let the user call get_JAMP_lines_split_order directly from a 1296 error_msg="Malformed '%s' argument passed to the "+\ 1297 "get_JAMP_lines_split_order function: %s"%str(split_order_amps) 1298 if(isinstance(col_amps,helas_objects.HelasMatrixElement)): 1299 color_amplitudes=col_amps.get_color_amplitudes() 1300 elif(isinstance(col_amps,list)): 1301 if(col_amps and isinstance(col_amps[0],list)): 1302 color_amplitudes=col_amps 1303 else: 1304 raise MadGraph5Error(error_msg%'col_amps') 1305 else: 1306 raise MadGraph5Error(error_msg%'col_amps') 1307 1308 # Verify the sanity of the split_order_amps and split_order_names args 1309 if isinstance(split_order_amps,list): 1310 for elem in split_order_amps: 1311 if len(elem)!=2: 1312 raise MadGraph5Error(error_msg%'split_order_amps') 1313 # Check the first element of the two lists to make sure they are 1314 # integers, although in principle they should all be integers. 1315 if not isinstance(elem[0],tuple) or \ 1316 not isinstance(elem[1],tuple) or \ 1317 not isinstance(elem[0][0],int) or \ 1318 not isinstance(elem[1][0],int): 1319 raise MadGraph5Error(error_msg%'split_order_amps') 1320 else: 1321 raise MadGraph5Error(error_msg%'split_order_amps') 1322 1323 if not split_order_names is None: 1324 if isinstance(split_order_names,list): 1325 # Should specify the same number of names as there are elements 1326 # in the key of the split_order_amps. 1327 if len(split_order_names)!=len(split_order_amps[0][0]): 1328 raise MadGraph5Error(error_msg%'split_order_names') 1329 # Check the first element of the list to be a string 1330 if not isinstance(split_order_names[0],str): 1331 raise MadGraph5Error(error_msg%'split_order_names') 1332 else: 1333 raise MadGraph5Error(error_msg%'split_order_names') 1334 1335 # Now scan all contributing orders to be individually computed and 1336 # construct the list of color_amplitudes for JAMP to be constructed 1337 # accordingly. 1338 res_list=[] 1339 max_tmp = 0 1340 for i, amp_order in enumerate(split_order_amps): 1341 col_amps_order = [] 1342 for jamp in color_amplitudes: 1343 col_amps_order.append([col_amp for col_amp in jamp if col_amp[1] in amp_order[1]]) 1344 if split_order_names: 1345 res_list.append('C JAMPs contributing to orders '+' '.join( 1346 ['%s=%i'%order for order in zip(split_order_names, 1347 amp_order[0])])) 1348 if self.opt['export_format'] in ['madloop_matchbox']: 1349 res_list.extend(self.get_JAMP_lines(col_amps_order, 1350 JAMP_format=JAMP_format.format(str(i+1)), 1351 JAMP_formatLC="LN"+JAMP_format.format(str(i+1)))[0]) 1352 else: 1353 toadd, nb_tmp = self.get_JAMP_lines(col_amps_order, 1354 JAMP_format=JAMP_format.format(str(i+1))) 1355 res_list.extend(toadd) 1356 max_tmp = max(max_tmp, nb_tmp) 1357 1358 return res_list, max_tmp
1359 1360
1361 - def get_JAMP_lines(self, col_amps, JAMP_format="JAMP(%s)", AMP_format="AMP(%s)", 1362 split=-1):
1363 """Return the JAMP = sum(fermionfactor * AMP(i)) lines from col_amps 1364 defined as a matrix element or directly as a color_amplitudes dictionary, 1365 Jamp_formatLC should be define to allow to add LeadingColor computation 1366 (usefull for MatchBox) 1367 The split argument defines how the JAMP lines should be split in order 1368 not to be too long.""" 1369 1370 # Let the user call get_JAMP_lines directly from a MatrixElement or from 1371 # the color amplitudes lists. 1372 if(isinstance(col_amps,helas_objects.HelasMatrixElement)): 1373 color_amplitudes=col_amps.get_color_amplitudes() 1374 elif(isinstance(col_amps,list)): 1375 if(col_amps and isinstance(col_amps[0],list)): 1376 color_amplitudes=col_amps 1377 else: 1378 raise MadGraph5Error("Incorrect col_amps argument passed to get_JAMP_lines") 1379 else: 1380 raise MadGraph5Error("Incorrect col_amps argument passed to get_JAMP_lines") 1381 1382 all_element = {} 1383 res_list = [] 1384 for i, coeff_list in enumerate(color_amplitudes): 1385 # It might happen that coeff_list is empty if this function was 1386 # called from get_JAMP_lines_split_order (i.e. if some color flow 1387 # does not contribute at all for a given order). 1388 # In this case we simply set it to 0. 1389 if coeff_list==[]: 1390 res_list.append(((JAMP_format+"=0D0") % str(i + 1))) 1391 continue 1392 # Break the JAMP definition into 'n=split' pieces to avoid having 1393 # arbitrarly long lines. 1394 first=True 1395 n = (len(coeff_list)+1 if split<=0 else split) 1396 while coeff_list!=[]: 1397 coefs=coeff_list[:n] 1398 coeff_list=coeff_list[n:] 1399 res = ((JAMP_format+"=") % str(i + 1)) + \ 1400 ((JAMP_format % str(i + 1)) if not first and split>0 else '') 1401 1402 first=False 1403 # Optimization: if all contributions to that color basis element have 1404 # the same coefficient (up to a sign), put it in front 1405 list_fracs = [abs(coefficient[0][1]) for coefficient in coefs] 1406 common_factor = False 1407 diff_fracs = list(set(list_fracs)) 1408 if len(diff_fracs) == 1 and abs(diff_fracs[0]) != 1: 1409 common_factor = True 1410 global_factor = diff_fracs[0] 1411 res = res + '%s(' % self.coeff(1, global_factor, False, 0) 1412 1413 # loop for JAMP 1414 for (coefficient, amp_number) in coefs: 1415 if not coefficient: 1416 continue 1417 value = (1j if coefficient[2] else 1)* coefficient[0] * coefficient[1] * fractions.Fraction(3)**coefficient[3] 1418 if (i+1, amp_number) not in all_element: 1419 all_element[(i+1, amp_number)] = value 1420 else: 1421 all_element[(i+1, amp_number)] += value 1422 if common_factor: 1423 res = (res + "%s" + AMP_format) % \ 1424 (self.coeff(coefficient[0], 1425 coefficient[1] / abs(coefficient[1]), 1426 coefficient[2], 1427 coefficient[3]), 1428 str(amp_number)) 1429 else: 1430 res = (res + "%s" + AMP_format) % (self.coeff(coefficient[0], 1431 coefficient[1], 1432 coefficient[2], 1433 coefficient[3]), 1434 str(amp_number)) 1435 1436 if common_factor: 1437 res = res + ')' 1438 res_list.append(res) 1439 1440 if 'jamp_optim' in self.cmd_options: 1441 jamp_optim = banner_mod.ConfigFile.format_variable(self.cmd_options['jamp_optim'], bool, 'jamp_optim') 1442 else: 1443 # class default 1444 jamp_optim = self.jamp_optim 1445 1446 if not jamp_optim: 1447 return res_list, 0 1448 else: 1449 saved = list(res_list) 1450 1451 if len(all_element) > 1000: 1452 logger.info("Computing Color-Flow optimization [%s term]", len(all_element)) 1453 start_time = time.time() 1454 else: 1455 start_time = 0 1456 1457 res_list = [] 1458 #misc.sprint(len(all_element)) 1459 1460 self.myjamp_count = 0 1461 for key in all_element: 1462 all_element[key] = complex(all_element[key]) 1463 new_mat, defs = self.optimise_jamp(all_element) 1464 if start_time: 1465 logger.info("Color-Flow passed to %s term in %ss. Introduce %i contraction", len(new_mat), int(time.time()-start_time), len(defs)) 1466 1467 1468 #misc.sprint("number of iteration", self.myjamp_count) 1469 def format(frac): 1470 if isinstance(frac, Fraction): 1471 if frac.denominator == 1: 1472 return str(frac.numerator) 1473 else: 1474 return "%id0/%id0" % (frac.numerator, frac.denominator) 1475 elif frac.real == frac: 1476 #misc.sprint(frac.real, frac) 1477 return ('%.15e' % frac.real).replace('e','d') 1478 #str(float(frac.real)).replace('e','d') 1479 else: 1480 return ('(%.15e,%.15e)' % (frac.real, frac.imag)).replace('e','d')
1481 #str(frac).replace('e','d').replace('j','*imag1') 1482 1483 1484 1485 for i, amp1, amp2, frac, nb in defs: 1486 if amp1 > 0: 1487 amp1 = AMP_format % amp1 1488 else: 1489 amp1 = "TMP_JAMP(%d)" % -amp1 1490 if amp2 > 0: 1491 amp2 = AMP_format % amp2 1492 else: 1493 amp2 = "TMP_JAMP(%d)" % -amp2 1494 1495 if frac not in [1., -1]: 1496 res_list.append(' TMP_JAMP(%d) = %s + (%s) * %s ! used %d times' % (i,amp1, format(frac), amp2, nb)) 1497 elif frac == 1.: 1498 res_list.append(' TMP_JAMP(%d) = %s + %s ! used %d times' % (i,amp1, amp2, nb)) 1499 else: 1500 res_list.append(' TMP_JAMP(%d) = %s - %s ! used %d times' % (i,amp1, amp2, nb)) 1501 1502 1503 # misc.sprint(new_mat) 1504 jamp_res = collections.defaultdict(list) 1505 max_jamp=0 1506 for (jamp, var), factor in new_mat.items(): 1507 if var > 0: 1508 name = AMP_format % var 1509 else: 1510 name = "TMP_JAMP(%d)" % -var 1511 if factor not in [1.]: 1512 jamp_res[jamp].append("(%s)*%s" % (format(factor), name)) 1513 elif factor ==1: 1514 jamp_res[jamp].append("%s" % (name)) 1515 max_jamp = max(max_jamp, jamp) 1516 1517 1518 for i in range(1,max_jamp+1): 1519 name = JAMP_format % i 1520 if not jamp_res[i]: 1521 res_list.append(" %s = 0d0" %(name)) 1522 else: 1523 res_list.append(" %s = %s" %(name, '+'.join(jamp_res[i]))) 1524 1525 return res_list, len(defs)
1526
1527 - def optimise_jamp(self, all_element, nb_line=0, nb_col=0, added=0):
1528 """ optimise problem of type Y = A X 1529 A is a matrix (all_element) 1530 X is the fortran name of the input. 1531 The code iteratively add sub-expression jtemp[sub_add] 1532 and recall itself (this is add to the X size) 1533 """ 1534 self.myjamp_count +=1 1535 1536 if not nb_line: 1537 for i,j in all_element: 1538 if i+1 > nb_line: 1539 nb_line = i+1 1540 if j+1> nb_col: 1541 nb_col = j+1 1542 1543 max_count = 0 1544 all_index = [] 1545 operation = collections.defaultdict(lambda: collections.defaultdict(int)) 1546 for i in range(nb_line): 1547 for j1 in range(-added, nb_col): 1548 v1 = all_element.get((i,j1), 0) 1549 if not v1: 1550 continue 1551 for j2 in range(j1+1, nb_col): 1552 R = all_element.get((i,j2), 0)/v1 1553 if not R: 1554 continue 1555 1556 operation[(j1,j2)][R] +=1 1557 if operation[(j1,j2)][R] > max_count: 1558 max_count = operation[(j1,j2)][R] 1559 all_index = [(j1,j2, R)] 1560 elif operation[(j1,j2)][R] == max_count: 1561 all_index.append((j1,j2, R)) 1562 if max_count <= 1: 1563 return all_element, [] 1564 #added += 1 1565 #misc.sprint(max_count, len(all_index)) 1566 #misc.sprint(operation) 1567 to_add = [] 1568 for index in all_index: 1569 j1,j2,R = index 1570 first = True 1571 for i in range(nb_line): 1572 v1 = all_element.get((i,j1), 0) 1573 v2 = all_element.get((i,j2), 0) 1574 if not v1 or not v2: 1575 continue 1576 if v2/v1 == R: 1577 if first: 1578 first = False 1579 added +=1 1580 to_add.append((added,j1,j2,R, max_count)) 1581 1582 all_element[(i,-added)] = v1 1583 del all_element[(i,j1)] #= 0 1584 del all_element[(i,j2)] #= 0 1585 1586 logger.log(5,"Define %d new shortcut reused %d times", len(to_add), max_count) 1587 new_element, new_def = self.optimise_jamp(all_element, nb_line=nb_line, nb_col=nb_col, added=added) 1588 for one_def in to_add: 1589 new_def.insert(0, one_def) 1590 return new_element, new_def
1591 1592 1593 1594 1595
1596 - def get_pdf_lines(self, matrix_element, ninitial, subproc_group = False):
1597 """Generate the PDF lines for the auto_dsig.f file""" 1598 1599 processes = matrix_element.get('processes') 1600 model = processes[0].get('model') 1601 1602 pdf_definition_lines = "" 1603 pdf_data_lines = "" 1604 pdf_lines = "" 1605 1606 if ninitial == 1: 1607 pdf_lines = "PD(0) = 0d0\nIPROC = 0\n" 1608 for i, proc in enumerate(processes): 1609 process_line = proc.base_string() 1610 pdf_lines = pdf_lines + "IPROC=IPROC+1 ! " + process_line 1611 pdf_lines = pdf_lines + "\nPD(IPROC)=1d0\n" 1612 pdf_lines = pdf_lines + "\nPD(0)=PD(0)+PD(IPROC)\n" 1613 else: 1614 # Pick out all initial state particles for the two beams 1615 initial_states = [sorted(list(set([p.get_initial_pdg(1) for \ 1616 p in processes]))), 1617 sorted(list(set([p.get_initial_pdg(2) for \ 1618 p in processes])))] 1619 1620 # Prepare all variable names 1621 pdf_codes = dict([(p, model.get_particle(p).get_name()) for p in \ 1622 sum(initial_states,[])]) 1623 for key,val in pdf_codes.items(): 1624 pdf_codes[key] = val.replace('~','x').replace('+','p').replace('-','m') 1625 1626 # Set conversion from PDG code to number used in PDF calls 1627 pdgtopdf = {21: 0, 22: 7} 1628 1629 # Fill in missing entries of pdgtopdf 1630 for pdg in sum(initial_states,[]): 1631 if not pdg in pdgtopdf and not pdg in list(pdgtopdf.values()): 1632 pdgtopdf[pdg] = pdg 1633 elif pdg not in pdgtopdf and pdg in list(pdgtopdf.values()): 1634 # If any particle has pdg code 7, we need to use something else 1635 pdgtopdf[pdg] = 6000000 + pdg 1636 1637 # Get PDF variable declarations for all initial states 1638 for i in [0,1]: 1639 pdf_definition_lines += "DOUBLE PRECISION " + \ 1640 ",".join(["%s%d" % (pdf_codes[pdg],i+1) \ 1641 for pdg in \ 1642 initial_states[i]]) + \ 1643 "\n" 1644 1645 # Get PDF data lines for all initial states 1646 for i in [0,1]: 1647 pdf_data_lines += "DATA " + \ 1648 ",".join(["%s%d" % (pdf_codes[pdg],i+1) \ 1649 for pdg in initial_states[i]]) + \ 1650 "/%d*1D0/" % len(initial_states[i]) + \ 1651 "\n" 1652 1653 # Get PDF lines for all different initial states 1654 for i, init_states in enumerate(initial_states): 1655 if subproc_group: 1656 pdf_lines = pdf_lines + \ 1657 "IF (ABS(LPP(IB(%d))).GE.1) THEN\nLP=SIGN(1,LPP(IB(%d)))\n" \ 1658 % (i + 1, i + 1) 1659 else: 1660 pdf_lines = pdf_lines + \ 1661 "IF (ABS(LPP(%d)) .GE. 1) THEN\nLP=SIGN(1,LPP(%d))\n" \ 1662 % (i + 1, i + 1) 1663 1664 for nbi,initial_state in enumerate(init_states): 1665 if initial_state in list(pdf_codes.keys()): 1666 if subproc_group: 1667 pdf_lines = pdf_lines + \ 1668 ("%s%d=PDG2PDF(ABS(LPP(IB(%d))),%d*LP, 1," + \ 1669 "XBK(IB(%d)),DSQRT(Q2FACT(%d)))\n") % \ 1670 (pdf_codes[initial_state], 1671 i + 1, i + 1, pdgtopdf[initial_state], 1672 i + 1, i + 1) 1673 else: 1674 pdf_lines = pdf_lines + \ 1675 ("%s%d=PDG2PDF(ABS(LPP(%d)),%d*LP, %d," + \ 1676 "XBK(%d),DSQRT(Q2FACT(%d)))\n") % \ 1677 (pdf_codes[initial_state], 1678 i + 1, i + 1, pdgtopdf[initial_state], 1679 i + 1, 1680 i + 1, i + 1) 1681 pdf_lines = pdf_lines + "ENDIF\n" 1682 1683 # Add up PDFs for the different initial state particles 1684 pdf_lines = pdf_lines + "PD(0) = 0d0\nIPROC = 0\n" 1685 for proc in processes: 1686 process_line = proc.base_string() 1687 pdf_lines = pdf_lines + "IPROC=IPROC+1 ! " + process_line 1688 pdf_lines = pdf_lines + "\nPD(IPROC)=" 1689 for ibeam in [1, 2]: 1690 initial_state = proc.get_initial_pdg(ibeam) 1691 if initial_state in list(pdf_codes.keys()): 1692 pdf_lines = pdf_lines + "%s%d*" % \ 1693 (pdf_codes[initial_state], ibeam) 1694 else: 1695 pdf_lines = pdf_lines + "1d0*" 1696 # Remove last "*" from pdf_lines 1697 pdf_lines = pdf_lines[:-1] + "\n" 1698 pdf_lines = pdf_lines + "PD(0)=PD(0)+DABS(PD(IPROC))\n" 1699 1700 # Remove last line break from the return variables 1701 return pdf_definition_lines[:-1], pdf_data_lines[:-1], pdf_lines[:-1]
1702 1703 #=========================================================================== 1704 # write_props_file 1705 #===========================================================================
1706 - def write_props_file(self, writer, matrix_element, s_and_t_channels):
1707 """Write the props.inc file for MadEvent. Needs input from 1708 write_configs_file.""" 1709 1710 lines = [] 1711 1712 particle_dict = matrix_element.get('processes')[0].get('model').\ 1713 get('particle_dict') 1714 1715 for iconf, configs in enumerate(s_and_t_channels): 1716 for vertex in configs[0] + configs[1][:-1]: 1717 leg = vertex.get('legs')[-1] 1718 if leg.get('id') not in particle_dict: 1719 # Fake propagator used in multiparticle vertices 1720 mass = 'zero' 1721 width = 'zero' 1722 pow_part = 0 1723 else: 1724 particle = particle_dict[leg.get('id')] 1725 # Get mass 1726 if particle.get('mass').lower() == 'zero': 1727 mass = particle.get('mass') 1728 else: 1729 mass = "abs(%s)" % particle.get('mass') 1730 # Get width 1731 if particle.get('width').lower() == 'zero': 1732 width = particle.get('width') 1733 else: 1734 width = "abs(%s)" % particle.get('width') 1735 1736 pow_part = 1 + int(particle.is_boson()) 1737 1738 lines.append("prmass(%d,%d) = %s" % \ 1739 (leg.get('number'), iconf + 1, mass)) 1740 lines.append("prwidth(%d,%d) = %s" % \ 1741 (leg.get('number'), iconf + 1, width)) 1742 lines.append("pow(%d,%d) = %d" % \ 1743 (leg.get('number'), iconf + 1, pow_part)) 1744 1745 # Write the file 1746 writer.writelines(lines) 1747 1748 return True
1749 1750 #=========================================================================== 1751 # write_configs_file 1752 #===========================================================================
1753 - def write_configs_file(self, writer, matrix_element):
1754 """Write the configs.inc file for MadEvent""" 1755 1756 # Extract number of external particles 1757 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 1758 1759 configs = [(i+1, d) for i,d in enumerate(matrix_element.get('diagrams'))] 1760 mapconfigs = [c[0] for c in configs] 1761 model = matrix_element.get('processes')[0].get('model') 1762 return mapconfigs, self.write_configs_file_from_diagrams(writer, 1763 [[c[1]] for c in configs], 1764 mapconfigs, 1765 nexternal, ninitial, 1766 model)
1767 1768 #=========================================================================== 1769 # write_configs_file_from_diagrams 1770 #===========================================================================
1771 - def write_configs_file_from_diagrams(self, writer, configs, mapconfigs, 1772 nexternal, ninitial, model):
1773 """Write the actual configs.inc file. 1774 1775 configs is the diagrams corresponding to configs (each 1776 diagrams is a list of corresponding diagrams for all 1777 subprocesses, with None if there is no corresponding diagrams 1778 for a given process). 1779 mapconfigs gives the diagram number for each config. 1780 1781 For s-channels, we need to output one PDG for each subprocess in 1782 the subprocess group, in order to be able to pick the right 1783 one for multiprocesses.""" 1784 1785 lines = [] 1786 1787 s_and_t_channels = [] 1788 1789 vert_list = [max([d for d in config if d][0].get_vertex_leg_numbers()) \ 1790 for config in configs if [d for d in config if d][0].\ 1791 get_vertex_leg_numbers()!=[]] 1792 minvert = min(vert_list) if vert_list!=[] else 0 1793 1794 # Number of subprocesses 1795 nsubprocs = len(configs[0]) 1796 1797 nconfigs = 0 1798 1799 new_pdg = model.get_first_non_pdg() 1800 1801 for iconfig, helas_diags in enumerate(configs): 1802 if any(vert > minvert for vert in [d for d in helas_diags if d]\ 1803 [0].get_vertex_leg_numbers()) : 1804 # Only 3-vertices allowed in configs.inc except for vertices 1805 # which originate from a shrunk loop. 1806 continue 1807 nconfigs += 1 1808 1809 # Need s- and t-channels for all subprocesses, including 1810 # those that don't contribute to this config 1811 empty_verts = [] 1812 stchannels = [] 1813 for h in helas_diags: 1814 if h: 1815 # get_s_and_t_channels gives vertices starting from 1816 # final state external particles and working inwards 1817 stchannels.append(h.get('amplitudes')[0].\ 1818 get_s_and_t_channels(ninitial, model, new_pdg)) 1819 else: 1820 stchannels.append((empty_verts, None)) 1821 1822 # For t-channels, just need the first non-empty one 1823 tchannels = [t for s,t in stchannels if t != None][0] 1824 1825 # For s_and_t_channels (to be used later) use only first config 1826 s_and_t_channels.append([[s for s,t in stchannels if t != None][0], 1827 tchannels]) 1828 1829 # Make sure empty_verts is same length as real vertices 1830 if any([s for s,t in stchannels]): 1831 empty_verts[:] = [None]*max([len(s) for s,t in stchannels]) 1832 1833 # Reorganize s-channel vertices to get a list of all 1834 # subprocesses for each vertex 1835 schannels = list(zip(*[s for s,t in stchannels])) 1836 else: 1837 schannels = [] 1838 1839 allchannels = schannels 1840 if len(tchannels) > 1: 1841 # Write out tchannels only if there are any non-trivial ones 1842 allchannels = schannels + tchannels 1843 1844 # Write out propagators for s-channel and t-channel vertices 1845 1846 lines.append("# Diagram %d" % (mapconfigs[iconfig])) 1847 # Correspondance between the config and the diagram = amp2 1848 lines.append("data mapconfig(%d)/%d/" % (nconfigs, 1849 mapconfigs[iconfig])) 1850 1851 for verts in allchannels: 1852 if verts in schannels: 1853 vert = [v for v in verts if v][0] 1854 else: 1855 vert = verts 1856 daughters = [leg.get('number') for leg in vert.get('legs')[:-1]] 1857 last_leg = vert.get('legs')[-1] 1858 lines.append("data (iforest(i,%d,%d),i=1,%d)/%s/" % \ 1859 (last_leg.get('number'), nconfigs, len(daughters), 1860 ",".join([str(d) for d in daughters]))) 1861 if verts in schannels: 1862 pdgs = [] 1863 for v in verts: 1864 if v: 1865 pdgs.append(v.get('legs')[-1].get('id')) 1866 else: 1867 pdgs.append(0) 1868 lines.append("data (sprop(i,%d,%d),i=1,%d)/%s/" % \ 1869 (last_leg.get('number'), nconfigs, nsubprocs, 1870 ",".join([str(d) for d in pdgs]))) 1871 lines.append("data tprid(%d,%d)/0/" % \ 1872 (last_leg.get('number'), nconfigs)) 1873 elif verts in tchannels[:-1]: 1874 lines.append("data tprid(%d,%d)/%d/" % \ 1875 (last_leg.get('number'), nconfigs, 1876 abs(last_leg.get('id')))) 1877 lines.append("data (sprop(i,%d,%d),i=1,%d)/%s/" % \ 1878 (last_leg.get('number'), nconfigs, nsubprocs, 1879 ",".join(['0'] * nsubprocs))) 1880 1881 # Write out number of configs 1882 lines.append("# Number of configs") 1883 lines.append("data mapconfig(0)/%d/" % nconfigs) 1884 1885 # Write the file 1886 writer.writelines(lines) 1887 1888 return s_and_t_channels
1889 1890 #=========================================================================== 1891 # Global helper methods 1892 #=========================================================================== 1893
1894 - def coeff(self, ff_number, frac, is_imaginary, Nc_power, Nc_value=3):
1895 """Returns a nicely formatted string for the coefficients in JAMP lines""" 1896 1897 total_coeff = ff_number * frac * fractions.Fraction(Nc_value) ** Nc_power 1898 1899 if total_coeff == 1: 1900 if is_imaginary: 1901 return '+imag1*' 1902 else: 1903 return '+' 1904 elif total_coeff == -1: 1905 if is_imaginary: 1906 return '-imag1*' 1907 else: 1908 return '-' 1909 1910 res_str = '%+iD0' % total_coeff.numerator 1911 1912 if total_coeff.denominator != 1: 1913 # Check if total_coeff is an integer 1914 res_str = res_str + '/%iD0' % total_coeff.denominator 1915 1916 if is_imaginary: 1917 res_str = res_str + '*imag1' 1918 1919 return res_str + '*'
1920 1921
1922 - def set_fortran_compiler(self, default_compiler, force=False):
1923 """Set compiler based on what's available on the system""" 1924 1925 # Check for compiler 1926 if default_compiler['fortran'] and misc.which(default_compiler['fortran']): 1927 f77_compiler = default_compiler['fortran'] 1928 elif misc.which('gfortran'): 1929 f77_compiler = 'gfortran' 1930 elif misc.which('g77'): 1931 f77_compiler = 'g77' 1932 elif misc.which('f77'): 1933 f77_compiler = 'f77' 1934 elif default_compiler['fortran']: 1935 logger.warning('No Fortran Compiler detected! Please install one') 1936 f77_compiler = default_compiler['fortran'] # maybe misc fail so try with it 1937 else: 1938 raise MadGraph5Error('No Fortran Compiler detected! Please install one') 1939 logger.info('Use Fortran compiler ' + f77_compiler) 1940 1941 1942 # Check for compiler. 1. set default. 1943 if default_compiler['f2py']: 1944 f2py_compiler = default_compiler['f2py'] 1945 else: 1946 f2py_compiler = '' 1947 # Try to find the correct one. 1948 if default_compiler['f2py'] and misc.which(default_compiler['f2py']): 1949 f2py_compiler = default_compiler['f2py'] 1950 elif misc.which('f2py'): 1951 f2py_compiler = 'f2py' 1952 elif sys.version_info[1] == 6: 1953 if misc.which('f2py-2.6'): 1954 f2py_compiler = 'f2py-2.6' 1955 elif misc.which('f2py2.6'): 1956 f2py_compiler = 'f2py2.6' 1957 elif sys.version_info[1] == 7: 1958 if misc.which('f2py-2.7'): 1959 f2py_compiler = 'f2py-2.7' 1960 elif misc.which('f2py2.7'): 1961 f2py_compiler = 'f2py2.7' 1962 1963 to_replace = {'fortran': f77_compiler, 'f2py': f2py_compiler} 1964 1965 1966 self.replace_make_opt_f_compiler(to_replace) 1967 # Replace also for Template but not for cluster 1968 if 'MADGRAPH_DATA' not in os.environ and ReadWrite: 1969 self.replace_make_opt_f_compiler(to_replace, pjoin(MG5DIR, 'Template', 'LO')) 1970 1971 return f77_compiler
1972 1973 # an alias for backward compatibility 1974 set_compiler = set_fortran_compiler 1975 1976
1977 - def set_cpp_compiler(self, default_compiler, force=False):
1978 """Set compiler based on what's available on the system""" 1979 1980 # Check for compiler 1981 if default_compiler and misc.which(default_compiler): 1982 compiler = default_compiler 1983 elif misc.which('g++'): 1984 #check if clang version 1985 p = misc.Popen(['g++', '--version'], stdout=subprocess.PIPE, 1986 stderr=subprocess.PIPE) 1987 out, _ = p.communicate() 1988 out = out.decode() 1989 if 'clang' in str(out) and misc.which('clang'): 1990 compiler = 'clang' 1991 else: 1992 compiler = 'g++' 1993 elif misc.which('c++'): 1994 compiler = 'c++' 1995 elif misc.which('clang'): 1996 compiler = 'clang' 1997 elif default_compiler: 1998 logger.warning('No c++ Compiler detected! Please install one') 1999 compiler = default_compiler # maybe misc fail so try with it 2000 else: 2001 raise MadGraph5Error('No c++ Compiler detected! Please install one') 2002 logger.info('Use c++ compiler ' + compiler) 2003 self.replace_make_opt_c_compiler(compiler) 2004 # Replace also for Template but not for cluster 2005 if 'MADGRAPH_DATA' not in os.environ and ReadWrite and \ 2006 not __debug__ and not os.path.exists(pjoin(MG5DIR,'bin','create_release.py')): 2007 self.replace_make_opt_c_compiler(compiler, pjoin(MG5DIR, 'Template', 'LO')) 2008 2009 return compiler
2010 2011
2012 - def replace_make_opt_f_compiler(self, compilers, root_dir = ""):
2013 """Set FC=compiler in Source/make_opts""" 2014 2015 assert isinstance(compilers, dict) 2016 2017 mod = False #avoid to rewrite the file if not needed 2018 if not root_dir: 2019 root_dir = self.dir_path 2020 2021 compiler= compilers['fortran'] 2022 f2py_compiler = compilers['f2py'] 2023 if not f2py_compiler: 2024 f2py_compiler = 'f2py' 2025 for_update= {'DEFAULT_F_COMPILER':compiler, 2026 'DEFAULT_F2PY_COMPILER':f2py_compiler} 2027 make_opts = pjoin(root_dir, 'Source', 'make_opts') 2028 2029 try: 2030 common_run_interface.CommonRunCmd.update_make_opts_full( 2031 make_opts, for_update) 2032 except IOError: 2033 if root_dir == self.dir_path: 2034 logger.info('Fail to set compiler. Trying to continue anyway.')
2035
2036 - def replace_make_opt_c_compiler(self, compiler, root_dir = ""):
2037 """Set CXX=compiler in Source/make_opts. 2038 The version is also checked, in order to set some extra flags 2039 if the compiler is clang (on MACOS)""" 2040 2041 is_clang = misc.detect_if_cpp_compiler_is_clang(compiler) 2042 is_lc = misc.detect_cpp_std_lib_dependence(compiler) == '-lc++' 2043 2044 2045 # list of the variable to set in the make_opts file 2046 for_update= {'DEFAULT_CPP_COMPILER':compiler, 2047 'MACFLAG':'-mmacosx-version-min=10.7' if is_clang and is_lc else '', 2048 'STDLIB': '-lc++' if is_lc else '-lstdc++', 2049 'STDLIB_FLAG': '-stdlib=libc++' if is_lc and is_clang else '' 2050 } 2051 2052 # for MOJAVE remove the MACFLAG: 2053 if is_clang: 2054 import platform 2055 version, _, _ = platform.mac_ver() 2056 if not version:# not linux 2057 version = 14 # set version to remove MACFLAG 2058 else: 2059 majversion, version = [int(x) for x in version.split('.',3)[:2]] 2060 2061 if majversion >= 11 or (majversion ==10 and version >= 14): 2062 for_update['MACFLAG'] = '-mmacosx-version-min=10.8' if is_lc else '' 2063 2064 if not root_dir: 2065 root_dir = self.dir_path 2066 make_opts = pjoin(root_dir, 'Source', 'make_opts') 2067 2068 try: 2069 common_run_interface.CommonRunCmd.update_make_opts_full( 2070 make_opts, for_update) 2071 except IOError: 2072 if root_dir == self.dir_path: 2073 logger.info('Fail to set compiler. Trying to continue anyway.') 2074 2075 return
2076
2077 #=============================================================================== 2078 # ProcessExporterFortranSA 2079 #=============================================================================== 2080 -class ProcessExporterFortranSA(ProcessExporterFortran):
2081 """Class to take care of exporting a set of matrix elements to 2082 MadGraph v4 StandAlone format.""" 2083 2084 matrix_template = "matrix_standalone_v4.inc" 2085
2086 - def __init__(self, *args,**opts):
2087 """add the format information compare to standard init""" 2088 2089 if 'format' in opts: 2090 self.format = opts['format'] 2091 del opts['format'] 2092 else: 2093 self.format = 'standalone' 2094 2095 self.prefix_info = {} 2096 ProcessExporterFortran.__init__(self, *args, **opts)
2097
2098 - def copy_template(self, model):
2099 """Additional actions needed for setup of Template 2100 """ 2101 2102 #First copy the full template tree if dir_path doesn't exit 2103 if os.path.isdir(self.dir_path): 2104 return 2105 2106 logger.info('initialize a new standalone directory: %s' % \ 2107 os.path.basename(self.dir_path)) 2108 temp_dir = pjoin(self.mgme_dir, 'Template/LO') 2109 2110 # Create the directory structure 2111 os.mkdir(self.dir_path) 2112 os.mkdir(pjoin(self.dir_path, 'Source')) 2113 os.mkdir(pjoin(self.dir_path, 'Source', 'MODEL')) 2114 os.mkdir(pjoin(self.dir_path, 'Source', 'DHELAS')) 2115 os.mkdir(pjoin(self.dir_path, 'SubProcesses')) 2116 os.mkdir(pjoin(self.dir_path, 'bin')) 2117 os.mkdir(pjoin(self.dir_path, 'bin', 'internal')) 2118 os.mkdir(pjoin(self.dir_path, 'lib')) 2119 os.mkdir(pjoin(self.dir_path, 'Cards')) 2120 2121 # Information at top-level 2122 #Write version info 2123 shutil.copy(pjoin(temp_dir, 'TemplateVersion.txt'), self.dir_path) 2124 try: 2125 shutil.copy(pjoin(self.mgme_dir, 'MGMEVersion.txt'), self.dir_path) 2126 except IOError: 2127 MG5_version = misc.get_pkg_info() 2128 open(pjoin(self.dir_path, 'MGMEVersion.txt'), 'w').write( \ 2129 "5." + MG5_version['version']) 2130 2131 2132 # Add file in SubProcesses 2133 shutil.copy(pjoin(self.mgme_dir, 'madgraph', 'iolibs', 'template_files', 'makefile_sa_f_sp'), 2134 pjoin(self.dir_path, 'SubProcesses', 'makefileP')) 2135 2136 if self.format == 'standalone': 2137 shutil.copy(pjoin(self.mgme_dir, 'madgraph', 'iolibs', 'template_files', 'check_sa.f'), 2138 pjoin(self.dir_path, 'SubProcesses', 'check_sa.f')) 2139 2140 # Add file in Source 2141 shutil.copy(pjoin(temp_dir, 'Source', 'make_opts'), 2142 pjoin(self.dir_path, 'Source')) 2143 # add the makefile 2144 filename = pjoin(self.dir_path,'Source','makefile') 2145 self.write_source_makefile(writers.FileWriter(filename))
2146 2147 #=========================================================================== 2148 # export model files 2149 #===========================================================================
2150 - def export_model_files(self, model_path):
2151 """export the model dependent files for V4 model""" 2152 2153 super(ProcessExporterFortranSA,self).export_model_files(model_path) 2154 # Add the routine update_as_param in v4 model 2155 # This is a function created in the UFO 2156 text=""" 2157 subroutine update_as_param() 2158 call setpara('param_card.dat',.false.) 2159 return 2160 end 2161 """ 2162 ff = open(os.path.join(self.dir_path, 'Source', 'MODEL', 'couplings.f'),'a') 2163 ff.write(text) 2164 ff.close() 2165 2166 text = open(pjoin(self.dir_path,'SubProcesses','check_sa.f')).read() 2167 text = text.replace('call setpara(\'param_card.dat\')', 'call setpara(\'param_card.dat\', .true.)') 2168 fsock = open(pjoin(self.dir_path,'SubProcesses','check_sa.f'), 'w') 2169 fsock.write(text) 2170 fsock.close() 2171 2172 self.make_model_symbolic_link()
2173 2174 #=========================================================================== 2175 # write a procdef_mg5 (an equivalent of the MG4 proc_card.dat) 2176 #===========================================================================
2177 - def write_procdef_mg5(self, file_pos, modelname, process_str):
2178 """ write an equivalent of the MG4 proc_card in order that all the Madevent 2179 Perl script of MadEvent4 are still working properly for pure MG5 run. 2180 Not needed for StandAlone so just return 2181 """ 2182 2183 return
2184 2185 2186 #=========================================================================== 2187 # Make the Helas and Model directories for Standalone directory 2188 #===========================================================================
2189 - def make(self):
2190 """Run make in the DHELAS and MODEL directories, to set up 2191 everything for running standalone 2192 """ 2193 2194 source_dir = pjoin(self.dir_path, "Source") 2195 logger.info("Running make for Helas") 2196 misc.compile(arg=['../lib/libdhelas.a'], cwd=source_dir, mode='fortran') 2197 logger.info("Running make for Model") 2198 misc.compile(arg=['../lib/libmodel.a'], cwd=source_dir, mode='fortran')
2199 2200 #=========================================================================== 2201 # Create proc_card_mg5.dat for Standalone directory 2202 #===========================================================================
2203 - def finalize(self, matrix_elements, history, mg5options, flaglist):
2204 """Finalize Standalone MG4 directory by 2205 generation proc_card_mg5.dat 2206 generate a global makefile 2207 """ 2208 2209 compiler = {'fortran': mg5options['fortran_compiler'], 2210 'cpp': mg5options['cpp_compiler'], 2211 'f2py': mg5options['f2py_compiler']} 2212 2213 self.compiler_choice(compiler) 2214 self.make() 2215 2216 # Write command history as proc_card_mg5 2217 if history and os.path.isdir(pjoin(self.dir_path, 'Cards')): 2218 output_file = pjoin(self.dir_path, 'Cards', 'proc_card_mg5.dat') 2219 history.write(output_file) 2220 2221 ProcessExporterFortran.finalize(self, matrix_elements, 2222 history, mg5options, flaglist) 2223 open(pjoin(self.dir_path,'__init__.py'),'w') 2224 open(pjoin(self.dir_path,'SubProcesses','__init__.py'),'w') 2225 2226 if False:#'mode' in self.opt and self.opt['mode'] == "reweight": 2227 #add the module to hande the NLO weight 2228 files.copytree(pjoin(MG5DIR, 'Template', 'RWGTNLO'), 2229 pjoin(self.dir_path, 'Source')) 2230 files.copytree(pjoin(MG5DIR, 'Template', 'NLO', 'Source', 'PDF'), 2231 pjoin(self.dir_path, 'Source', 'PDF')) 2232 self.write_pdf_opendata() 2233 2234 if self.prefix_info: 2235 self.write_f2py_splitter() 2236 self.write_f2py_makefile() 2237 self.write_f2py_check_sa(matrix_elements, 2238 pjoin(self.dir_path,'SubProcesses','check_sa.py')) 2239 else: 2240 # create a single makefile to compile all the subprocesses 2241 text = '''\n# For python linking (require f2py part of numpy)\nifeq ($(origin MENUM),undefined)\n MENUM=2\nendif\n''' 2242 deppython = '' 2243 for Pdir in os.listdir(pjoin(self.dir_path,'SubProcesses')): 2244 if os.path.isdir(pjoin(self.dir_path, 'SubProcesses', Pdir)): 2245 text += '%(0)s/matrix$(MENUM)py.so:\n\tcd %(0)s;make matrix$(MENUM)py.so\n'% {'0': Pdir} 2246 deppython += ' %(0)s/matrix$(MENUM)py.so ' % {'0': Pdir} 2247 text+='all: %s\n\techo \'done\'' % deppython 2248 2249 ff = open(pjoin(self.dir_path, 'SubProcesses', 'makefile'),'a') 2250 ff.write(text) 2251 ff.close()
2252
2253 - def write_f2py_splitter(self):
2254 """write a function to call the correct matrix element""" 2255 2256 template = """ 2257 %(python_information)s 2258 subroutine smatrixhel(pdgs, procid, npdg, p, ALPHAS, SCALE2, nhel, ANS) 2259 IMPLICIT NONE 2260 C ALPHAS is given at scale2 (SHOULD be different of 0 for loop induced, ignore for LO) 2261 2262 CF2PY double precision, intent(in), dimension(0:3,npdg) :: p 2263 CF2PY integer, intent(in), dimension(npdg) :: pdgs 2264 CF2PY integer, intent(in):: procid 2265 CF2PY integer, intent(in) :: npdg 2266 CF2PY double precision, intent(out) :: ANS 2267 CF2PY double precision, intent(in) :: ALPHAS 2268 CF2PY double precision, intent(in) :: SCALE2 2269 integer pdgs(*) 2270 integer npdg, nhel, procid 2271 double precision p(*) 2272 double precision ANS, ALPHAS, PI,SCALE2 2273 include 'coupl.inc' 2274 2275 PI = 3.141592653589793D0 2276 G = 2* DSQRT(ALPHAS*PI) 2277 CALL UPDATE_AS_PARAM() 2278 c if (scale2.ne.0d0) stop 1 2279 2280 %(smatrixhel)s 2281 2282 return 2283 end 2284 2285 SUBROUTINE INITIALISE(PATH) 2286 C ROUTINE FOR F2PY to read the benchmark point. 2287 IMPLICIT NONE 2288 CHARACTER*512 PATH 2289 CF2PY INTENT(IN) :: PATH 2290 CALL SETPARA(PATH) !first call to setup the paramaters 2291 RETURN 2292 END 2293 2294 2295 subroutine CHANGE_PARA(name, value) 2296 implicit none 2297 CF2PY intent(in) :: name 2298 CF2PY intent(in) :: value 2299 2300 character*512 name 2301 double precision value 2302 2303 %(helreset_def)s 2304 2305 include '../Source/MODEL/input.inc' 2306 include '../Source/MODEL/coupl.inc' 2307 2308 %(helreset_setup)s 2309 2310 SELECT CASE (name) 2311 %(parameter_setup)s 2312 CASE DEFAULT 2313 write(*,*) 'no parameter matching', name, value 2314 END SELECT 2315 2316 return 2317 end 2318 2319 subroutine update_all_coup() 2320 implicit none 2321 call coup() 2322 return 2323 end 2324 2325 2326 subroutine get_pdg_order(PDG, ALLPROC) 2327 IMPLICIT NONE 2328 CF2PY INTEGER, intent(out) :: PDG(%(nb_me)i,%(maxpart)i) 2329 CF2PY INTEGER, intent(out) :: ALLPROC(%(nb_me)i) 2330 INTEGER PDG(%(nb_me)i,%(maxpart)i), PDGS(%(nb_me)i,%(maxpart)i) 2331 INTEGER ALLPROC(%(nb_me)i),PIDs(%(nb_me)i) 2332 DATA PDGS/ %(pdgs)s / 2333 DATA PIDS/ %(pids)s / 2334 PDG = PDGS 2335 ALLPROC = PIDS 2336 RETURN 2337 END 2338 2339 subroutine get_prefix(PREFIX) 2340 IMPLICIT NONE 2341 CF2PY CHARACTER*20, intent(out) :: PREFIX(%(nb_me)i) 2342 character*20 PREFIX(%(nb_me)i),PREF(%(nb_me)i) 2343 DATA PREF / '%(prefix)s'/ 2344 PREFIX = PREF 2345 RETURN 2346 END 2347 2348 2349 """ 2350 2351 allids = list(self.prefix_info.keys()) 2352 allprefix = [self.prefix_info[key][0] for key in allids] 2353 min_nexternal = min([len(ids[0]) for ids in allids]) 2354 max_nexternal = max([len(ids[0]) for ids in allids]) 2355 2356 info = [] 2357 for (key, pid), (prefix, tag) in self.prefix_info.items(): 2358 info.append('#PY %s : %s # %s %s' % (tag, key, prefix, pid)) 2359 2360 2361 text = [] 2362 for n_ext in range(min_nexternal, max_nexternal+1): 2363 current_id = [ids[0] for ids in allids if len(ids[0])==n_ext] 2364 current_pid = [ids[1] for ids in allids if len(ids[0])==n_ext] 2365 if not current_id: 2366 continue 2367 if min_nexternal != max_nexternal: 2368 if n_ext == min_nexternal: 2369 text.append(' if (npdg.eq.%i)then' % n_ext) 2370 else: 2371 text.append(' else if (npdg.eq.%i)then' % n_ext) 2372 for ii,pdgs in enumerate(current_id): 2373 pid = current_pid[ii] 2374 condition = '.and.'.join(['%i.eq.pdgs(%i)' %(pdg, i+1) for i, pdg in enumerate(pdgs)]) 2375 if ii==0: 2376 text.append( ' if(%s.and.(procid.le.0.or.procid.eq.%d)) then ! %i' % (condition, pid, ii)) 2377 else: 2378 text.append( ' else if(%s.and.(procid.le.0.or.procid.eq.%d)) then ! %i' % (condition,pid,ii)) 2379 text.append(' call %ssmatrixhel(p, nhel, ans)' % self.prefix_info[(pdgs,pid)][0]) 2380 text.append(' endif') 2381 #close the function 2382 if min_nexternal != max_nexternal: 2383 text.append('endif') 2384 2385 params = self.get_model_parameter(self.model) 2386 parameter_setup =[] 2387 for key, var in params.items(): 2388 parameter_setup.append(' CASE ("%s")\n %s = value' 2389 % (key, var)) 2390 2391 # part for the resetting of the helicity 2392 helreset_def = [] 2393 helreset_setup = [] 2394 for prefix in set(allprefix): 2395 helreset_setup.append(' %shelreset = .true. ' % prefix) 2396 helreset_def.append(' logical %shelreset \n common /%shelreset/ %shelreset' % (prefix, prefix, prefix)) 2397 2398 2399 formatting = {'python_information':'\n'.join(info), 2400 'smatrixhel': '\n'.join(text), 2401 'maxpart': max_nexternal, 2402 'nb_me': len(allids), 2403 'pdgs': ','.join(str(pdg[i]) if i<len(pdg) else '0' 2404 for i in range(max_nexternal) for (pdg,pid) in allids), 2405 'prefix':'\',\''.join(allprefix), 2406 'pids': ','.join(str(pid) for (pdg,pid) in allids), 2407 'parameter_setup': '\n'.join(parameter_setup), 2408 'helreset_def' : '\n'.join(helreset_def), 2409 'helreset_setup' : '\n'.join(helreset_setup), 2410 } 2411 formatting['lenprefix'] = len(formatting['prefix']) 2412 text = template % formatting 2413 fsock = writers.FortranWriter(pjoin(self.dir_path, 'SubProcesses', 'all_matrix.f'),'w') 2414 fsock.writelines(text) 2415 fsock.close()
2416
2417 - def get_model_parameter(self, model):
2418 """ returns all the model parameter 2419 """ 2420 params = {} 2421 for p in model.get('parameters')[('external',)]: 2422 name = p.name 2423 nopref = name[4:] if name.startswith('mdl_') else name 2424 params[nopref] = name 2425 2426 block = p.lhablock 2427 lha = '_'.join([str(i) for i in p.lhacode]) 2428 params['%s_%s' % (block.upper(), lha)] = name 2429 2430 return params
2431 2432 2433 2434 2435
2436 - def write_f2py_check_sa(self, matrix_element, writer):
2437 """ Write the general check_sa.py in SubProcesses that calls all processes successively.""" 2438 # To be implemented. It is just an example file, i.e. not crucial. 2439 return
2440
2441 - def write_f2py_makefile(self):
2442 """ """ 2443 # Add file in SubProcesses 2444 shutil.copy(pjoin(self.mgme_dir, 'madgraph', 'iolibs', 'template_files', 'makefile_sa_f2py'), 2445 pjoin(self.dir_path, 'SubProcesses', 'makefile'))
2446
2447 - def create_MA5_cards(self,*args,**opts):
2448 """ Overload the function of the mother so as to bypass this in StandAlone.""" 2449 pass
2450
2451 - def compiler_choice(self, compiler):
2452 """ Different daughter classes might want different compilers. 2453 So this function is meant to be overloaded if desired.""" 2454 2455 self.set_compiler(compiler)
2456 2457 #=========================================================================== 2458 # generate_subprocess_directory 2459 #===========================================================================
2460 - def generate_subprocess_directory(self, matrix_element, 2461 fortran_model, number):
2462 """Generate the Pxxxxx directory for a subprocess in MG4 standalone, 2463 including the necessary matrix.f and nexternal.inc files""" 2464 2465 cwd = os.getcwd() 2466 # Create the directory PN_xx_xxxxx in the specified path 2467 dirpath = pjoin(self.dir_path, 'SubProcesses', \ 2468 "P%s" % matrix_element.get('processes')[0].shell_string()) 2469 2470 if self.opt['sa_symmetry']: 2471 # avoid symmetric output 2472 for i,proc in enumerate(matrix_element.get('processes')): 2473 2474 tag = proc.get_tag() 2475 legs = proc.get('legs')[:] 2476 leg0 = proc.get('legs')[0] 2477 leg1 = proc.get('legs')[1] 2478 if not leg1.get('state'): 2479 proc.get('legs')[0] = leg1 2480 proc.get('legs')[1] = leg0 2481 flegs = proc.get('legs')[2:] 2482 for perm in itertools.permutations(flegs): 2483 for i,p in enumerate(perm): 2484 proc.get('legs')[i+2] = p 2485 dirpath2 = pjoin(self.dir_path, 'SubProcesses', \ 2486 "P%s" % proc.shell_string()) 2487 #restore original order 2488 proc.get('legs')[2:] = legs[2:] 2489 if os.path.exists(dirpath2): 2490 proc.get('legs')[:] = legs 2491 return 0 2492 proc.get('legs')[:] = legs 2493 2494 try: 2495 os.mkdir(dirpath) 2496 except os.error as error: 2497 logger.warning(error.strerror + " " + dirpath) 2498 2499 #try: 2500 # os.chdir(dirpath) 2501 #except os.error: 2502 # logger.error('Could not cd to directory %s' % dirpath) 2503 # return 0 2504 2505 logger.info('Creating files in directory %s' % dirpath) 2506 2507 # Extract number of external particles 2508 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 2509 2510 # Create the matrix.f file and the nexternal.inc file 2511 if self.opt['export_format']=='standalone_msP': 2512 filename = pjoin(dirpath, 'matrix_prod.f') 2513 else: 2514 filename = pjoin(dirpath, 'matrix.f') 2515 2516 proc_prefix = '' 2517 if 'prefix' in self.cmd_options: 2518 if self.cmd_options['prefix'] == 'int': 2519 proc_prefix = 'M%s_' % number 2520 elif self.cmd_options['prefix'] == 'proc': 2521 proc_prefix = matrix_element.get('processes')[0].shell_string().split('_',1)[1] 2522 else: 2523 raise Exception('--prefix options supports only \'int\' and \'proc\'') 2524 for proc in matrix_element.get('processes'): 2525 ids = [l.get('id') for l in proc.get('legs_with_decays')] 2526 self.prefix_info[(tuple(ids), proc.get('id'))] = [proc_prefix, proc.get_tag()] 2527 2528 calls = self.write_matrix_element_v4( 2529 writers.FortranWriter(filename), 2530 matrix_element, 2531 fortran_model, 2532 proc_prefix=proc_prefix) 2533 2534 if self.opt['export_format'] == 'standalone_msP': 2535 filename = pjoin(dirpath,'configs_production.inc') 2536 mapconfigs, s_and_t_channels = self.write_configs_file(\ 2537 writers.FortranWriter(filename), 2538 matrix_element) 2539 2540 filename = pjoin(dirpath,'props_production.inc') 2541 self.write_props_file(writers.FortranWriter(filename), 2542 matrix_element, 2543 s_and_t_channels) 2544 2545 filename = pjoin(dirpath,'nexternal_prod.inc') 2546 self.write_nexternal_madspin(writers.FortranWriter(filename), 2547 nexternal, ninitial) 2548 2549 if self.opt['export_format']=='standalone_msF': 2550 filename = pjoin(dirpath, 'helamp.inc') 2551 ncomb=matrix_element.get_helicity_combinations() 2552 self.write_helamp_madspin(writers.FortranWriter(filename), 2553 ncomb) 2554 2555 filename = pjoin(dirpath, 'nexternal.inc') 2556 self.write_nexternal_file(writers.FortranWriter(filename), 2557 nexternal, ninitial) 2558 2559 filename = pjoin(dirpath, 'pmass.inc') 2560 self.write_pmass_file(writers.FortranWriter(filename), 2561 matrix_element) 2562 2563 filename = pjoin(dirpath, 'ngraphs.inc') 2564 self.write_ngraphs_file(writers.FortranWriter(filename), 2565 len(matrix_element.get_all_amplitudes())) 2566 2567 # Generate diagrams 2568 if not 'noeps' in self.opt['output_options'] or self.opt['output_options']['noeps'] != 'True': 2569 filename = pjoin(dirpath, "matrix.ps") 2570 plot = draw.MultiEpsDiagramDrawer(matrix_element.get('base_amplitude').\ 2571 get('diagrams'), 2572 filename, 2573 model=matrix_element.get('processes')[0].\ 2574 get('model'), 2575 amplitude=True) 2576 logger.info("Generating Feynman diagrams for " + \ 2577 matrix_element.get('processes')[0].nice_string()) 2578 plot.draw() 2579 2580 linkfiles = ['check_sa.f', 'coupl.inc'] 2581 2582 if proc_prefix and os.path.exists(pjoin(dirpath, '..', 'check_sa.f')): 2583 text = open(pjoin(dirpath, '..', 'check_sa.f')).read() 2584 pat = re.compile('smatrix', re.I) 2585 new_text, n = re.subn(pat, '%ssmatrix' % proc_prefix, text) 2586 with open(pjoin(dirpath, 'check_sa.f'),'w') as f: 2587 f.write(new_text) 2588 linkfiles.pop(0) 2589 2590 for file in linkfiles: 2591 ln('../%s' % file, cwd=dirpath) 2592 ln('../makefileP', name='makefile', cwd=dirpath) 2593 # Return to original PWD 2594 #os.chdir(cwd) 2595 2596 if not calls: 2597 calls = 0 2598 return calls
2599 2600 2601 #=========================================================================== 2602 # write_source_makefile 2603 #===========================================================================
2604 - def write_source_makefile(self, writer):
2605 """Write the nexternal.inc file for MG4""" 2606 2607 path = pjoin(_file_path,'iolibs','template_files','madevent_makefile_source') 2608 set_of_lib = '$(LIBDIR)libdhelas.$(libext) $(LIBDIR)libmodel.$(libext)' 2609 model_line='''$(LIBDIR)libmodel.$(libext): MODEL\n\t cd MODEL; make\n''' 2610 2611 replace_dict= {'libraries': set_of_lib, 2612 'model':model_line, 2613 'additional_dsample': '', 2614 'additional_dependencies':''} 2615 2616 text = open(path).read() % replace_dict 2617 2618 if writer: 2619 writer.write(text) 2620 2621 return replace_dict
2622 2623 #=========================================================================== 2624 # write_matrix_element_v4 2625 #===========================================================================
2626 - def write_matrix_element_v4(self, writer, matrix_element, fortran_model, 2627 write=True, proc_prefix=''):
2628 """Export a matrix element to a matrix.f file in MG4 standalone format 2629 if write is on False, just return the replace_dict and not write anything.""" 2630 2631 2632 if not matrix_element.get('processes') or \ 2633 not matrix_element.get('diagrams'): 2634 return 0 2635 2636 if writer: 2637 if not isinstance(writer, writers.FortranWriter): 2638 raise writers.FortranWriter.FortranWriterError(\ 2639 "writer not FortranWriter but %s" % type(writer)) 2640 # Set lowercase/uppercase Fortran code 2641 writers.FortranWriter.downcase = False 2642 2643 2644 if 'sa_symmetry' not in self.opt: 2645 self.opt['sa_symmetry']=False 2646 2647 2648 # The proc_id is for MadEvent grouping which is never used in SA. 2649 replace_dict = {'global_variable':'', 'amp2_lines':'', 2650 'proc_prefix':proc_prefix, 'proc_id':''} 2651 2652 # Extract helas calls 2653 helas_calls = fortran_model.get_matrix_element_calls(\ 2654 matrix_element) 2655 2656 replace_dict['helas_calls'] = "\n".join(helas_calls) 2657 2658 # Extract version number and date from VERSION file 2659 info_lines = self.get_mg5_info_lines() 2660 replace_dict['info_lines'] = info_lines 2661 2662 # Extract process info lines 2663 process_lines = self.get_process_info_lines(matrix_element) 2664 replace_dict['process_lines'] = process_lines 2665 2666 # Extract number of external particles 2667 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 2668 replace_dict['nexternal'] = nexternal 2669 replace_dict['nincoming'] = ninitial 2670 2671 # Extract ncomb 2672 ncomb = matrix_element.get_helicity_combinations() 2673 replace_dict['ncomb'] = ncomb 2674 2675 # Extract helicity lines 2676 helicity_lines = self.get_helicity_lines(matrix_element) 2677 replace_dict['helicity_lines'] = helicity_lines 2678 2679 # Extract overall denominator 2680 # Averaging initial state color, spin, and identical FS particles 2681 replace_dict['den_factor_line'] = self.get_den_factor_line(matrix_element) 2682 2683 # Extract ngraphs 2684 ngraphs = matrix_element.get_number_of_amplitudes() 2685 replace_dict['ngraphs'] = ngraphs 2686 2687 # Extract nwavefuncs 2688 nwavefuncs = matrix_element.get_number_of_wavefunctions() 2689 replace_dict['nwavefuncs'] = nwavefuncs 2690 2691 # Extract ncolor 2692 ncolor = max(1, len(matrix_element.get('color_basis'))) 2693 replace_dict['ncolor'] = ncolor 2694 2695 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor() 2696 replace_dict['beamone_helavgfactor'], replace_dict['beamtwo_helavgfactor'] =\ 2697 matrix_element.get_beams_hel_avg_factor() 2698 2699 # Extract color data lines 2700 color_data_lines = self.get_color_data_lines(matrix_element) 2701 replace_dict['color_data_lines'] = "\n".join(color_data_lines) 2702 2703 if self.opt['export_format']=='standalone_msP': 2704 # For MadSpin need to return the AMP2 2705 amp2_lines = self.get_amp2_lines(matrix_element, [] ) 2706 replace_dict['amp2_lines'] = '\n'.join(amp2_lines) 2707 replace_dict['global_variable'] = \ 2708 " Double Precision amp2(NGRAPHS)\n common/to_amps/ amp2\n" 2709 2710 # JAMP definition, depends on the number of independent split orders 2711 split_orders=matrix_element.get('processes')[0].get('split_orders') 2712 2713 if len(split_orders)==0: 2714 replace_dict['nSplitOrders']='' 2715 # Extract JAMP lines 2716 jamp_lines, nb_tmp_jamp = self.get_JAMP_lines(matrix_element) 2717 # Consider the output of a dummy order 'ALL_ORDERS' for which we 2718 # set all amplitude order to weight 1 and only one squared order 2719 # contribution which is of course ALL_ORDERS=2. 2720 squared_orders = [(2,),] 2721 amp_orders = [((1,),tuple(range(1,ngraphs+1)))] 2722 replace_dict['chosen_so_configs'] = '.TRUE.' 2723 replace_dict['nSqAmpSplitOrders']=1 2724 replace_dict['split_order_str_list']='' 2725 replace_dict['nb_temp_jamp'] = nb_tmp_jamp 2726 2727 else: 2728 squared_orders, amp_orders = matrix_element.get_split_orders_mapping() 2729 replace_dict['nAmpSplitOrders']=len(amp_orders) 2730 replace_dict['nSqAmpSplitOrders']=len(squared_orders) 2731 replace_dict['nSplitOrders']=len(split_orders) 2732 replace_dict['split_order_str_list']=str(split_orders) 2733 amp_so = self.get_split_orders_lines( 2734 [amp_order[0] for amp_order in amp_orders],'AMPSPLITORDERS') 2735 sqamp_so = self.get_split_orders_lines(squared_orders,'SQSPLITORDERS') 2736 replace_dict['ampsplitorders']='\n'.join(amp_so) 2737 replace_dict['sqsplitorders']='\n'.join(sqamp_so) 2738 jamp_lines, nb_tmp_jamp = self.get_JAMP_lines_split_order(\ 2739 matrix_element,amp_orders,split_order_names=split_orders) 2740 replace_dict['nb_temp_jamp'] = nb_tmp_jamp 2741 # Now setup the array specifying what squared split order is chosen 2742 replace_dict['chosen_so_configs']=self.set_chosen_SO_index( 2743 matrix_element.get('processes')[0],squared_orders) 2744 2745 # For convenience we also write the driver check_sa_splitOrders.f 2746 # that explicitely writes out the contribution from each squared order. 2747 # The original driver still works and is compiled with 'make' while 2748 # the splitOrders one is compiled with 'make check_sa_born_splitOrders' 2749 check_sa_writer=writers.FortranWriter('check_sa_born_splitOrders.f') 2750 self.write_check_sa_splitOrders(squared_orders,split_orders, 2751 nexternal,ninitial,proc_prefix,check_sa_writer) 2752 2753 if write: 2754 writers.FortranWriter('nsqso_born.inc').writelines( 2755 """INTEGER NSQSO_BORN 2756 PARAMETER (NSQSO_BORN=%d)"""%replace_dict['nSqAmpSplitOrders']) 2757 2758 replace_dict['jamp_lines'] = '\n'.join(jamp_lines) 2759 2760 matrix_template = self.matrix_template 2761 if self.opt['export_format']=='standalone_msP' : 2762 matrix_template = 'matrix_standalone_msP_v4.inc' 2763 elif self.opt['export_format']=='standalone_msF': 2764 matrix_template = 'matrix_standalone_msF_v4.inc' 2765 elif self.opt['export_format']=='matchbox': 2766 replace_dict["proc_prefix"] = 'MG5_%i_' % matrix_element.get('processes')[0].get('id') 2767 replace_dict["color_information"] = self.get_color_string_lines(matrix_element) 2768 2769 if len(split_orders)>0: 2770 if self.opt['export_format'] in ['standalone_msP', 'standalone_msF']: 2771 logger.debug("Warning: The export format %s is not "+\ 2772 " available for individual ME evaluation of given coupl. orders."+\ 2773 " Only the total ME will be computed.", self.opt['export_format']) 2774 elif self.opt['export_format'] in ['madloop_matchbox']: 2775 replace_dict["color_information"] = self.get_color_string_lines(matrix_element) 2776 matrix_template = "matrix_standalone_matchbox_splitOrders_v4.inc" 2777 else: 2778 matrix_template = "matrix_standalone_splitOrders_v4.inc" 2779 2780 replace_dict['template_file'] = pjoin(_file_path, 'iolibs', 'template_files', matrix_template) 2781 replace_dict['template_file2'] = pjoin(_file_path, \ 2782 'iolibs/template_files/split_orders_helping_functions.inc') 2783 if write and writer: 2784 path = replace_dict['template_file'] 2785 content = open(path).read() 2786 content = content % replace_dict 2787 # Write the file 2788 writer.writelines(content) 2789 # Add the helper functions. 2790 if len(split_orders)>0: 2791 content = '\n' + open(replace_dict['template_file2'])\ 2792 .read()%replace_dict 2793 writer.writelines(content) 2794 return len([call for call in helas_calls if call.find('#') != 0]) 2795 else: 2796 replace_dict['return_value'] = len([call for call in helas_calls if call.find('#') != 0]) 2797 return replace_dict # for subclass update
2798
2799 - def write_check_sa_splitOrders(self,squared_orders, split_orders, nexternal, 2800 nincoming, proc_prefix, writer):
2801 """ Write out a more advanced version of the check_sa drivers that 2802 individually returns the matrix element for each contributing squared 2803 order.""" 2804 2805 check_sa_content = open(pjoin(self.mgme_dir, 'madgraph', 'iolibs', \ 2806 'template_files', 'check_sa_splitOrders.f')).read() 2807 printout_sq_orders=[] 2808 for i, squared_order in enumerate(squared_orders): 2809 sq_orders=[] 2810 for j, sqo in enumerate(squared_order): 2811 sq_orders.append('%s=%d'%(split_orders[j],sqo)) 2812 printout_sq_orders.append(\ 2813 "write(*,*) '%d) Matrix element for (%s) = ',MATELEMS(%d)"\ 2814 %(i+1,' '.join(sq_orders),i+1)) 2815 printout_sq_orders='\n'.join(printout_sq_orders) 2816 replace_dict = {'printout_sqorders':printout_sq_orders, 2817 'nSplitOrders':len(squared_orders), 2818 'nexternal':nexternal, 2819 'nincoming':nincoming, 2820 'proc_prefix':proc_prefix} 2821 2822 if writer: 2823 writer.writelines(check_sa_content % replace_dict) 2824 else: 2825 return replace_dict
2826
2827 -class ProcessExporterFortranMatchBox(ProcessExporterFortranSA):
2828 """class to take care of exporting a set of matrix element for the Matchbox 2829 code in the case of Born only routine""" 2830 2831 default_opt = {'clean': False, 'complex_mass':False, 2832 'export_format':'matchbox', 'mp': False, 2833 'sa_symmetry': True} 2834 2835 #specific template of the born 2836 2837 2838 matrix_template = "matrix_standalone_matchbox.inc" 2839 2840 @staticmethod
2841 - def get_color_string_lines(matrix_element):
2842 """Return the color matrix definition lines for this matrix element. Split 2843 rows in chunks of size n.""" 2844 2845 if not matrix_element.get('color_matrix'): 2846 return "\n".join(["out = 1"]) 2847 2848 #start the real work 2849 color_denominators = matrix_element.get('color_matrix').\ 2850 get_line_denominators() 2851 matrix_strings = [] 2852 my_cs = color.ColorString() 2853 for i_color in range(len(color_denominators)): 2854 # Then write the numerators for the matrix elements 2855 my_cs.from_immutable(sorted(matrix_element.get('color_basis').keys())[i_color]) 2856 t_str=repr(my_cs) 2857 t_match=re.compile(r"(\w+)\(([\s\d+\,]*)\)") 2858 # from '1 T(2,4,1) Tr(4,5,6) Epsilon(5,3,2,1) T(1,2)' returns with findall: 2859 # [('T', '2,4,1'), ('Tr', '4,5,6'), ('Epsilon', '5,3,2,1'), ('T', '1,2')] 2860 all_matches = t_match.findall(t_str) 2861 output = {} 2862 arg=[] 2863 for match in all_matches: 2864 ctype, tmparg = match[0], [m.strip() for m in match[1].split(',')] 2865 if ctype in ['ColorOne' ]: 2866 continue 2867 if ctype not in ['T', 'Tr' ]: 2868 raise MadGraph5Error('Color Structure not handled by Matchbox: %s' % ctype) 2869 tmparg += ['0'] 2870 arg +=tmparg 2871 for j, v in enumerate(arg): 2872 output[(i_color,j)] = v 2873 2874 for key in output: 2875 if matrix_strings == []: 2876 #first entry 2877 matrix_strings.append(""" 2878 if (in1.eq.%s.and.in2.eq.%s)then 2879 out = %s 2880 """ % (key[0], key[1], output[key])) 2881 else: 2882 #not first entry 2883 matrix_strings.append(""" 2884 elseif (in1.eq.%s.and.in2.eq.%s)then 2885 out = %s 2886 """ % (key[0], key[1], output[key])) 2887 if len(matrix_strings): 2888 matrix_strings.append(" else \n out = - 1 \n endif") 2889 else: 2890 return "\n out = - 1 \n " 2891 return "\n".join(matrix_strings)
2892
2893 - def make(self,*args,**opts):
2894 pass
2895
2896 - def get_JAMP_lines(self, col_amps, JAMP_format="JAMP(%s)", AMP_format="AMP(%s)", split=-1, 2897 JAMP_formatLC=None):
2898 2899 """Adding leading color part of the colorflow""" 2900 2901 if not JAMP_formatLC: 2902 JAMP_formatLC= "LN%s" % JAMP_format 2903 2904 error_msg="Malformed '%s' argument passed to the get_JAMP_lines" 2905 if(isinstance(col_amps,helas_objects.HelasMatrixElement)): 2906 col_amps=col_amps.get_color_amplitudes() 2907 elif(isinstance(col_amps,list)): 2908 if(col_amps and isinstance(col_amps[0],list)): 2909 col_amps=col_amps 2910 else: 2911 raise MadGraph5Error(error_msg % 'col_amps') 2912 else: 2913 raise MadGraph5Error(error_msg % 'col_amps') 2914 2915 text, nb = super(ProcessExporterFortranMatchBox, self).get_JAMP_lines(col_amps, 2916 JAMP_format=JAMP_format, 2917 AMP_format=AMP_format, 2918 split=-1) 2919 2920 2921 # Filter the col_ampls to generate only those without any 1/NC terms 2922 2923 LC_col_amps = [] 2924 for coeff_list in col_amps: 2925 to_add = [] 2926 for (coefficient, amp_number) in coeff_list: 2927 if coefficient[3]==0: 2928 to_add.append( (coefficient, amp_number) ) 2929 LC_col_amps.append(to_add) 2930 2931 text2, nb = super(ProcessExporterFortranMatchBox, self).get_JAMP_lines(LC_col_amps, 2932 JAMP_format=JAMP_formatLC, 2933 AMP_format=AMP_format, 2934 split=-1) 2935 text += text2 2936 2937 return text, 0
2938
2939 2940 2941 2942 #=============================================================================== 2943 # ProcessExporterFortranMW 2944 #=============================================================================== 2945 -class ProcessExporterFortranMW(ProcessExporterFortran):
2946 """Class to take care of exporting a set of matrix elements to 2947 MadGraph v4 - MadWeight format.""" 2948 2949 matrix_file="matrix_standalone_v4.inc" 2950 jamp_optim = False 2951
2952 - def copy_template(self, model):
2953 """Additional actions needed for setup of Template 2954 """ 2955 2956 super(ProcessExporterFortranMW, self).copy_template(model) 2957 2958 # Add the MW specific file 2959 shutil.copytree(pjoin(MG5DIR,'Template','MadWeight'), 2960 pjoin(self.dir_path, 'Source','MadWeight'), True) 2961 shutil.copytree(pjoin(MG5DIR,'madgraph','madweight'), 2962 pjoin(self.dir_path, 'bin','internal','madweight'), True) 2963 files.mv(pjoin(self.dir_path, 'Source','MadWeight','src','setrun.f'), 2964 pjoin(self.dir_path, 'Source','setrun.f')) 2965 files.mv(pjoin(self.dir_path, 'Source','MadWeight','src','run.inc'), 2966 pjoin(self.dir_path, 'Source','run.inc')) 2967 # File created from Template (Different in some child class) 2968 filename = os.path.join(self.dir_path,'Source','run_config.inc') 2969 self.write_run_config_file(writers.FortranWriter(filename)) 2970 2971 try: 2972 subprocess.call([os.path.join(self.dir_path, 'Source','MadWeight','bin','internal','pass_to_madweight')], 2973 stdout = os.open(os.devnull, os.O_RDWR), 2974 stderr = os.open(os.devnull, os.O_RDWR), 2975 cwd=self.dir_path) 2976 except OSError: 2977 # Probably madweight already called 2978 pass 2979 2980 # Copy the different python file in the Template 2981 self.copy_python_file() 2982 # create the appropriate cuts.f 2983 self.get_mw_cuts_version() 2984 2985 # add the makefile in Source directory 2986 filename = os.path.join(self.dir_path,'Source','makefile') 2987 self.write_source_makefile(writers.FortranWriter(filename))
2988 2989 2990 2991 2992 #=========================================================================== 2993 # convert_model 2994 #===========================================================================
2995 - def convert_model(self, model, wanted_lorentz = [], 2996 wanted_couplings = []):
2997 2998 super(ProcessExporterFortranMW,self).convert_model(model, 2999 wanted_lorentz, wanted_couplings) 3000 3001 IGNORE_PATTERNS = ('*.pyc','*.dat','*.py~') 3002 try: 3003 shutil.rmtree(pjoin(self.dir_path,'bin','internal','ufomodel')) 3004 except OSError as error: 3005 pass 3006 model_path = model.get('modelpath') 3007 # This is not safe if there is a '##' or '-' in the path. 3008 shutil.copytree(model_path, 3009 pjoin(self.dir_path,'bin','internal','ufomodel'), 3010 ignore=shutil.ignore_patterns(*IGNORE_PATTERNS)) 3011 if hasattr(model, 'restrict_card'): 3012 out_path = pjoin(self.dir_path, 'bin', 'internal','ufomodel', 3013 'restrict_default.dat') 3014 if isinstance(model.restrict_card, check_param_card.ParamCard): 3015 model.restrict_card.write(out_path) 3016 else: 3017 files.cp(model.restrict_card, out_path)
3018 3019 #=========================================================================== 3020 # generate_subprocess_directory 3021 #===========================================================================
3022 - def copy_python_file(self):
3023 """copy the python file require for the Template""" 3024 3025 # madevent interface 3026 cp(_file_path+'/interface/madweight_interface.py', 3027 self.dir_path+'/bin/internal/madweight_interface.py') 3028 cp(_file_path+'/interface/extended_cmd.py', 3029 self.dir_path+'/bin/internal/extended_cmd.py') 3030 cp(_file_path+'/interface/common_run_interface.py', 3031 self.dir_path+'/bin/internal/common_run_interface.py') 3032 cp(_file_path+'/various/misc.py', self.dir_path+'/bin/internal/misc.py') 3033 cp(_file_path+'/iolibs/files.py', self.dir_path+'/bin/internal/files.py') 3034 cp(_file_path+'/iolibs/save_load_object.py', 3035 self.dir_path+'/bin/internal/save_load_object.py') 3036 cp(_file_path+'/madevent/gen_crossxhtml.py', 3037 self.dir_path+'/bin/internal/gen_crossxhtml.py') 3038 cp(_file_path+'/madevent/sum_html.py', 3039 self.dir_path+'/bin/internal/sum_html.py') 3040 cp(_file_path+'/various/FO_analyse_card.py', 3041 self.dir_path+'/bin/internal/FO_analyse_card.py') 3042 cp(_file_path+'/iolibs/file_writers.py', 3043 self.dir_path+'/bin/internal/file_writers.py') 3044 #model file 3045 cp(_file_path+'../models/check_param_card.py', 3046 self.dir_path+'/bin/internal/check_param_card.py') 3047 3048 #madevent file 3049 cp(_file_path+'/__init__.py', self.dir_path+'/bin/internal/__init__.py') 3050 cp(_file_path+'/various/lhe_parser.py', 3051 self.dir_path+'/bin/internal/lhe_parser.py') 3052 3053 cp(_file_path+'/various/banner.py', 3054 self.dir_path+'/bin/internal/banner.py') 3055 cp(_file_path+'/various/shower_card.py', 3056 self.dir_path+'/bin/internal/shower_card.py') 3057 cp(_file_path+'/various/cluster.py', 3058 self.dir_path+'/bin/internal/cluster.py') 3059 3060 # logging configuration 3061 cp(_file_path+'/interface/.mg5_logging.conf', 3062 self.dir_path+'/bin/internal/me5_logging.conf') 3063 cp(_file_path+'/interface/coloring_logging.py', 3064 self.dir_path+'/bin/internal/coloring_logging.py')
3065 3066 3067 #=========================================================================== 3068 # Change the version of cuts.f to the one compatible with MW 3069 #===========================================================================
3070 - def get_mw_cuts_version(self, outpath=None):
3071 """create the appropriate cuts.f 3072 This is based on the one associated to ME output but: 3073 1) No clustering (=> remove initcluster/setclscales) 3074 2) Adding the definition of cut_bw at the file. 3075 """ 3076 3077 template = open(pjoin(MG5DIR,'Template','LO','SubProcesses','cuts.f')) 3078 3079 text = StringIO() 3080 #1) remove all dependencies in ickkw >1: 3081 nb_if = 0 3082 for line in template: 3083 if 'if(xqcut.gt.0d0' in line: 3084 nb_if = 1 3085 if nb_if == 0: 3086 text.write(line) 3087 continue 3088 if re.search(r'if\(.*\)\s*then', line): 3089 nb_if += 1 3090 elif 'endif' in line: 3091 nb_if -= 1 3092 3093 #2) add fake cut_bw (have to put the true one later) 3094 text.write(""" 3095 logical function cut_bw(p) 3096 include 'madweight_param.inc' 3097 double precision p(*) 3098 if (bw_cut) then 3099 cut_bw = .true. 3100 else 3101 stop 1 3102 endif 3103 return 3104 end 3105 """) 3106 3107 final = text.getvalue() 3108 #3) remove the call to initcluster: 3109 template = final.replace('call initcluster', '! Remove for MW!call initcluster') 3110 template = template.replace('genps.inc', 'maxparticles.inc') 3111 #Now we can write it 3112 if not outpath: 3113 fsock = open(pjoin(self.dir_path, 'SubProcesses', 'cuts.f'), 'w') 3114 elif isinstance(outpath, str): 3115 fsock = open(outpath, 'w') 3116 else: 3117 fsock = outpath 3118 fsock.write(template)
3119 3120 3121 3122 #=========================================================================== 3123 # Make the Helas and Model directories for Standalone directory 3124 #===========================================================================
3125 - def make(self):
3126 """Run make in the DHELAS, MODEL, PDF and CERNLIB directories, to set up 3127 everything for running madweight 3128 """ 3129 3130 source_dir = os.path.join(self.dir_path, "Source") 3131 logger.info("Running make for Helas") 3132 misc.compile(arg=['../lib/libdhelas.a'], cwd=source_dir, mode='fortran') 3133 logger.info("Running make for Model") 3134 misc.compile(arg=['../lib/libmodel.a'], cwd=source_dir, mode='fortran') 3135 logger.info("Running make for PDF") 3136 misc.compile(arg=['../lib/libpdf.a'], cwd=source_dir, mode='fortran') 3137 logger.info("Running make for CERNLIB") 3138 misc.compile(arg=['../lib/libcernlib.a'], cwd=source_dir, mode='fortran') 3139 logger.info("Running make for GENERIC") 3140 misc.compile(arg=['../lib/libgeneric.a'], cwd=source_dir, mode='fortran') 3141 logger.info("Running make for blocks") 3142 misc.compile(arg=['../lib/libblocks.a'], cwd=source_dir, mode='fortran') 3143 logger.info("Running make for tools") 3144 misc.compile(arg=['../lib/libtools.a'], cwd=source_dir, mode='fortran')
3145 3146 #=========================================================================== 3147 # Create proc_card_mg5.dat for MadWeight directory 3148 #===========================================================================
3149 - def finalize(self, matrix_elements, history, mg5options, flaglist):
3150 """Finalize Standalone MG4 directory by generation proc_card_mg5.dat""" 3151 3152 compiler = {'fortran': mg5options['fortran_compiler'], 3153 'cpp': mg5options['cpp_compiler'], 3154 'f2py': mg5options['f2py_compiler']} 3155 3156 3157 3158 #proc_charac 3159 self.proc_characteristics['nlo_mixed_expansion'] = mg5options['nlo_mixed_expansion'] 3160 self.create_proc_charac() 3161 3162 # Write maxparticles.inc based on max of ME's/subprocess groups 3163 filename = pjoin(self.dir_path,'Source','maxparticles.inc') 3164 self.write_maxparticles_file(writers.FortranWriter(filename), 3165 matrix_elements) 3166 ln(pjoin(self.dir_path, 'Source', 'maxparticles.inc'), 3167 pjoin(self.dir_path, 'Source','MadWeight','blocks')) 3168 ln(pjoin(self.dir_path, 'Source', 'maxparticles.inc'), 3169 pjoin(self.dir_path, 'Source','MadWeight','tools')) 3170 3171 self.set_compiler(compiler) 3172 self.make() 3173 3174 # Write command history as proc_card_mg5 3175 if os.path.isdir(os.path.join(self.dir_path, 'Cards')): 3176 output_file = os.path.join(self.dir_path, 'Cards', 'proc_card_mg5.dat') 3177 history.write(output_file) 3178 3179 ProcessExporterFortran.finalize(self, matrix_elements, 3180 history, mg5options, flaglist)
3181 3182 3183 3184 #=========================================================================== 3185 # create the run_card for MW 3186 #===========================================================================
3187 - def create_run_card(self, matrix_elements, history):
3188 """ """ 3189 3190 run_card = banner_mod.RunCard() 3191 3192 # pass to default for MW 3193 run_card["run_tag"] = "\'not_use\'" 3194 run_card["fixed_ren_scale"] = "T" 3195 run_card["fixed_fac_scale"] = "T" 3196 run_card.remove_all_cut() 3197 3198 run_card.write(pjoin(self.dir_path, 'Cards', 'run_card_default.dat'), 3199 template=pjoin(MG5DIR, 'Template', 'MadWeight', 'Cards', 'run_card.dat'), 3200 python_template=True) 3201 run_card.write(pjoin(self.dir_path, 'Cards', 'run_card.dat'), 3202 template=pjoin(MG5DIR, 'Template', 'MadWeight', 'Cards', 'run_card.dat'), 3203 python_template=True)
3204 3205 #=========================================================================== 3206 # export model files 3207 #===========================================================================
3208 - def export_model_files(self, model_path):
3209 """export the model dependent files for V4 model""" 3210 3211 super(ProcessExporterFortranMW,self).export_model_files(model_path) 3212 # Add the routine update_as_param in v4 model 3213 # This is a function created in the UFO 3214 text=""" 3215 subroutine update_as_param() 3216 call setpara('param_card.dat',.false.) 3217 return 3218 end 3219 """ 3220 ff = open(os.path.join(self.dir_path, 'Source', 'MODEL', 'couplings.f'),'a') 3221 ff.write(text) 3222 ff.close() 3223 3224 # Modify setrun.f 3225 text = open(os.path.join(self.dir_path,'Source','setrun.f')).read() 3226 text = text.replace('call setpara(param_card_name)', 'call setpara(param_card_name, .true.)') 3227 fsock = open(os.path.join(self.dir_path,'Source','setrun.f'), 'w') 3228 fsock.write(text) 3229 fsock.close() 3230 3231 # Modify initialization.f 3232 text = open(os.path.join(self.dir_path,'SubProcesses','initialization.f')).read() 3233 text = text.replace('call setpara(param_name)', 'call setpara(param_name, .true.)') 3234 fsock = open(os.path.join(self.dir_path,'SubProcesses','initialization.f'), 'w') 3235 fsock.write(text) 3236 fsock.close() 3237 3238 3239 self.make_model_symbolic_link()
3240 3241 #=========================================================================== 3242 # generate_subprocess_directory 3243 #===========================================================================
3244 - def generate_subprocess_directory(self, matrix_element, 3245 fortran_model,number):
3246 """Generate the Pxxxxx directory for a subprocess in MG4 MadWeight format, 3247 including the necessary matrix.f and nexternal.inc files""" 3248 3249 cwd = os.getcwd() 3250 # Create the directory PN_xx_xxxxx in the specified path 3251 dirpath = os.path.join(self.dir_path, 'SubProcesses', \ 3252 "P%s" % matrix_element.get('processes')[0].shell_string()) 3253 3254 try: 3255 os.mkdir(dirpath) 3256 except os.error as error: 3257 logger.warning(error.strerror + " " + dirpath) 3258 3259 #try: 3260 # os.chdir(dirpath) 3261 #except os.error: 3262 # logger.error('Could not cd to directory %s' % dirpath) 3263 # return 0 3264 3265 logger.info('Creating files in directory %s' % dirpath) 3266 3267 # Extract number of external particles 3268 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 3269 3270 # Create the matrix.f file and the nexternal.inc file 3271 filename = pjoin(dirpath,'matrix.f') 3272 calls,ncolor = self.write_matrix_element_v4( 3273 writers.FortranWriter(filename), 3274 matrix_element, 3275 fortran_model) 3276 3277 filename = pjoin(dirpath, 'auto_dsig.f') 3278 self.write_auto_dsig_file(writers.FortranWriter(filename), 3279 matrix_element) 3280 3281 filename = pjoin(dirpath, 'configs.inc') 3282 mapconfigs, s_and_t_channels = self.write_configs_file(\ 3283 writers.FortranWriter(filename), 3284 matrix_element) 3285 3286 filename = pjoin(dirpath, 'nexternal.inc') 3287 self.write_nexternal_file(writers.FortranWriter(filename), 3288 nexternal, ninitial) 3289 3290 filename = pjoin(dirpath, 'leshouche.inc') 3291 self.write_leshouche_file(writers.FortranWriter(filename), 3292 matrix_element) 3293 3294 filename = pjoin(dirpath, 'props.inc') 3295 self.write_props_file(writers.FortranWriter(filename), 3296 matrix_element, 3297 s_and_t_channels) 3298 3299 filename = pjoin(dirpath, 'pmass.inc') 3300 self.write_pmass_file(writers.FortranWriter(filename), 3301 matrix_element) 3302 3303 filename = pjoin(dirpath, 'ngraphs.inc') 3304 self.write_ngraphs_file(writers.FortranWriter(filename), 3305 len(matrix_element.get_all_amplitudes())) 3306 3307 filename = pjoin(dirpath, 'maxamps.inc') 3308 self.write_maxamps_file(writers.FortranWriter(filename), 3309 len(matrix_element.get('diagrams')), 3310 ncolor, 3311 len(matrix_element.get('processes')), 3312 1) 3313 3314 filename = pjoin(dirpath, 'phasespace.inc') 3315 self.write_phasespace_file(writers.FortranWriter(filename), 3316 len(matrix_element.get('diagrams')), 3317 ) 3318 3319 # Generate diagrams 3320 if not 'noeps' in self.opt['output_options'] or self.opt['output_options']['noeps'] != 'True': 3321 filename = pjoin(dirpath, "matrix.ps") 3322 plot = draw.MultiEpsDiagramDrawer(matrix_element.get('base_amplitude').\ 3323 get('diagrams'), 3324 filename, 3325 model=matrix_element.get('processes')[0].\ 3326 get('model'), 3327 amplitude='') 3328 logger.info("Generating Feynman diagrams for " + \ 3329 matrix_element.get('processes')[0].nice_string()) 3330 plot.draw() 3331 3332 #import genps.inc and maxconfigs.inc into Subprocesses 3333 ln(self.dir_path + '/Source/genps.inc', self.dir_path + '/SubProcesses', log=False) 3334 #ln(self.dir_path + '/Source/maxconfigs.inc', self.dir_path + '/SubProcesses', log=False) 3335 3336 linkfiles = ['driver.f', 'cuts.f', 'initialization.f','gen_ps.f', 'makefile', 'coupl.inc','madweight_param.inc', 'run.inc', 'setscales.f', 'genps.inc'] 3337 3338 for file in linkfiles: 3339 ln('../%s' % file, starting_dir=cwd) 3340 3341 ln('nexternal.inc', '../../Source', log=False, cwd=dirpath) 3342 ln('leshouche.inc', '../../Source', log=False, cwd=dirpath) 3343 ln('maxamps.inc', '../../Source', log=False, cwd=dirpath) 3344 ln('phasespace.inc', '../', log=True, cwd=dirpath) 3345 # Return to original PWD 3346 #os.chdir(cwd) 3347 3348 if not calls: 3349 calls = 0 3350 return calls
3351 3352 #=========================================================================== 3353 # write_matrix_element_v4 3354 #===========================================================================
3355 - def write_matrix_element_v4(self, writer, matrix_element, fortran_model,proc_id = "", config_map = []):
3356 """Export a matrix element to a matrix.f file in MG4 MadWeight format""" 3357 3358 if not matrix_element.get('processes') or \ 3359 not matrix_element.get('diagrams'): 3360 return 0 3361 3362 if writer: 3363 if not isinstance(writer, writers.FortranWriter): 3364 raise writers.FortranWriter.FortranWriterError(\ 3365 "writer not FortranWriter") 3366 3367 # Set lowercase/uppercase Fortran code 3368 writers.FortranWriter.downcase = False 3369 3370 replace_dict = {} 3371 3372 # Extract version number and date from VERSION file 3373 info_lines = self.get_mg5_info_lines() 3374 replace_dict['info_lines'] = info_lines 3375 3376 # Extract process info lines 3377 process_lines = self.get_process_info_lines(matrix_element) 3378 replace_dict['process_lines'] = process_lines 3379 3380 # Set proc_id 3381 replace_dict['proc_id'] = proc_id 3382 3383 # Extract number of external particles 3384 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 3385 replace_dict['nexternal'] = nexternal 3386 3387 # Extract ncomb 3388 ncomb = matrix_element.get_helicity_combinations() 3389 replace_dict['ncomb'] = ncomb 3390 3391 # Extract helicity lines 3392 helicity_lines = self.get_helicity_lines(matrix_element) 3393 replace_dict['helicity_lines'] = helicity_lines 3394 3395 # Extract overall denominator 3396 # Averaging initial state color, spin, and identical FS particles 3397 den_factor_line = self.get_den_factor_line(matrix_element) 3398 replace_dict['den_factor_line'] = den_factor_line 3399 3400 # Extract ngraphs 3401 ngraphs = matrix_element.get_number_of_amplitudes() 3402 replace_dict['ngraphs'] = ngraphs 3403 3404 # Extract nwavefuncs 3405 nwavefuncs = matrix_element.get_number_of_wavefunctions() 3406 replace_dict['nwavefuncs'] = nwavefuncs 3407 3408 # Extract ncolor 3409 ncolor = max(1, len(matrix_element.get('color_basis'))) 3410 replace_dict['ncolor'] = ncolor 3411 3412 # Extract color data lines 3413 color_data_lines = self.get_color_data_lines(matrix_element) 3414 replace_dict['color_data_lines'] = "\n".join(color_data_lines) 3415 3416 # Extract helas calls 3417 helas_calls = fortran_model.get_matrix_element_calls(\ 3418 matrix_element) 3419 3420 replace_dict['helas_calls'] = "\n".join(helas_calls) 3421 3422 # Extract JAMP lines 3423 jamp_lines, nb = self.get_JAMP_lines(matrix_element) 3424 replace_dict['jamp_lines'] = '\n'.join(jamp_lines) 3425 3426 replace_dict['template_file'] = os.path.join(_file_path, \ 3427 'iolibs/template_files/%s' % self.matrix_file) 3428 replace_dict['template_file2'] = '' 3429 3430 if writer: 3431 file = open(replace_dict['template_file']).read() 3432 file = file % replace_dict 3433 # Write the file 3434 writer.writelines(file) 3435 return len([call for call in helas_calls if call.find('#') != 0]),ncolor 3436 else: 3437 replace_dict['return_value'] = (len([call for call in helas_calls if call.find('#') != 0]),ncolor)
3438 3439 #=========================================================================== 3440 # write_source_makefile 3441 #===========================================================================
3442 - def write_source_makefile(self, writer):
3443 """Write the nexternal.inc file for madweight""" 3444 3445 3446 path = os.path.join(_file_path,'iolibs','template_files','madweight_makefile_source') 3447 set_of_lib = '$(LIBRARIES) $(LIBDIR)libdhelas.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libcernlib.$(libext) $(LIBDIR)libtf.$(libext)' 3448 text = open(path).read() % {'libraries': set_of_lib} 3449 writer.write(text) 3450 3451 return True
3452
3453 - def write_phasespace_file(self, writer, nb_diag):
3454 """ """ 3455 3456 template = """ include 'maxparticles.inc' 3457 integer max_branches 3458 parameter (max_branches=max_particles-1) 3459 integer max_configs 3460 parameter (max_configs=%(nb_diag)s) 3461 3462 c channel position 3463 integer config_pos,perm_pos 3464 common /to_config/config_pos,perm_pos 3465 3466 """ 3467 3468 writer.write(template % {'nb_diag': nb_diag})
3469 3470 3471 #=========================================================================== 3472 # write_auto_dsig_file 3473 #===========================================================================
3474 - def write_auto_dsig_file(self, writer, matrix_element, proc_id = ""):
3475 """Write the auto_dsig.f file for the differential cross section 3476 calculation, includes pdf call information (MadWeight format)""" 3477 3478 if not matrix_element.get('processes') or \ 3479 not matrix_element.get('diagrams'): 3480 return 0 3481 3482 nexternal, ninitial = matrix_element.get_nexternal_ninitial() 3483 3484 if ninitial < 1 or ninitial > 2: 3485 raise writers.FortranWriter.FortranWriterError("""Need ninitial = 1 or 2 to write auto_dsig file""") 3486 3487 replace_dict = {} 3488 3489 # Extract version number and date from VERSION file 3490 info_lines = self.get_mg5_info_lines() 3491 replace_dict['info_lines'] = info_lines 3492 3493 # Extract process info lines 3494 process_lines = self.get_process_info_lines(matrix_element) 3495 replace_dict['process_lines'] = process_lines 3496 3497 # Set proc_id 3498 replace_dict['proc_id'] = proc_id 3499 replace_dict['numproc'] = 1 3500 3501 # Set dsig_line 3502 if ninitial == 1: 3503 # No conversion, since result of decay should be given in GeV 3504 dsig_line = "pd(0)*dsiguu" 3505 else: 3506 # Convert result (in GeV) to pb 3507 dsig_line = "pd(0)*conv*dsiguu" 3508 3509 replace_dict['dsig_line'] = dsig_line 3510 3511 # Extract pdf lines 3512 pdf_vars, pdf_data, pdf_lines = \ 3513 self.get_pdf_lines(matrix_element, ninitial, proc_id != "") 3514 replace_dict['pdf_vars'] = pdf_vars 3515 replace_dict['pdf_data'] = pdf_data 3516 replace_dict['pdf_lines'] = pdf_lines 3517 3518 # Lines that differ between subprocess group and regular 3519 if proc_id: 3520 replace_dict['numproc'] = int(proc_id) 3521 replace_dict['passcuts_begin'] = "" 3522 replace_dict['passcuts_end'] = "" 3523 # Set lines for subprocess group version 3524 # Set define_iconfigs_lines 3525 replace_dict['define_subdiag_lines'] = \ 3526 """\nINTEGER SUBDIAG(MAXSPROC),IB(2) 3527 COMMON/TO_SUB_DIAG/SUBDIAG,IB""" 3528 else: 3529 replace_dict['passcuts_begin'] = "IF (PASSCUTS(PP)) THEN" 3530 replace_dict['passcuts_end'] = "ENDIF" 3531 replace_dict['define_subdiag_lines'] = "" 3532 3533 if writer: 3534 file = open(os.path.join(_file_path, \ 3535 'iolibs/template_files/auto_dsig_mw.inc')).read() 3536 3537 file = file % replace_dict 3538 # Write the file 3539 writer.writelines(file) 3540 else: 3541 return replace_dict
3542 #=========================================================================== 3543 # write_configs_file 3544 #===========================================================================
3545 - def write_configs_file(self, writer, matrix_element):
3546 """Write the configs.inc file for MadEvent""" 3547 3548 # Extract number of external particles 3549 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 3550 3551 configs = [(i+1, d) for i,d in enumerate(matrix_element.get('diagrams'))] 3552 mapconfigs = [c[0] for c in configs] 3553 model = matrix_element.get('processes')[0].get('model') 3554 return mapconfigs, self.write_configs_file_from_diagrams(writer, 3555 [[c[1]] for c in configs], 3556 mapconfigs, 3557 nexternal, ninitial,matrix_element, model)
3558 3559 #=========================================================================== 3560 # write_run_configs_file 3561 #===========================================================================
3562 - def write_run_config_file(self, writer):
3563 """Write the run_configs.inc file for MadWeight""" 3564 3565 path = os.path.join(_file_path,'iolibs','template_files','madweight_run_config.inc') 3566 text = open(path).read() % {'chanperjob':'5'} 3567 writer.write(text) 3568 return True
3569 3570 #=========================================================================== 3571 # write_configs_file_from_diagrams 3572 #===========================================================================
3573 - def write_configs_file_from_diagrams(self, writer, configs, mapconfigs, 3574 nexternal, ninitial, matrix_element, model):
3575 """Write the actual configs.inc file. 3576 3577 configs is the diagrams corresponding to configs (each 3578 diagrams is a list of corresponding diagrams for all 3579 subprocesses, with None if there is no corresponding diagrams 3580 for a given process). 3581 mapconfigs gives the diagram number for each config. 3582 3583 For s-channels, we need to output one PDG for each subprocess in 3584 the subprocess group, in order to be able to pick the right 3585 one for multiprocesses.""" 3586 3587 lines = [] 3588 3589 particle_dict = matrix_element.get('processes')[0].get('model').\ 3590 get('particle_dict') 3591 3592 s_and_t_channels = [] 3593 3594 vert_list = [max([d for d in config if d][0].get_vertex_leg_numbers()) \ 3595 for config in configs if [d for d in config if d][0].\ 3596 get_vertex_leg_numbers()!=[]] 3597 3598 minvert = min(vert_list) if vert_list!=[] else 0 3599 # Number of subprocesses 3600 nsubprocs = len(configs[0]) 3601 3602 nconfigs = 0 3603 3604 new_pdg = model.get_first_non_pdg() 3605 3606 for iconfig, helas_diags in enumerate(configs): 3607 if any([vert > minvert for vert in 3608 [d for d in helas_diags if d][0].get_vertex_leg_numbers()]): 3609 # Only 3-vertices allowed in configs.inc 3610 continue 3611 nconfigs += 1 3612 3613 # Need s- and t-channels for all subprocesses, including 3614 # those that don't contribute to this config 3615 empty_verts = [] 3616 stchannels = [] 3617 for h in helas_diags: 3618 if h: 3619 # get_s_and_t_channels gives vertices starting from 3620 # final state external particles and working inwards 3621 stchannels.append(h.get('amplitudes')[0].\ 3622 get_s_and_t_channels(ninitial,model,new_pdg)) 3623 else: 3624 stchannels.append((empty_verts, None)) 3625 3626 # For t-channels, just need the first non-empty one 3627 tchannels = [t for s,t in stchannels if t != None][0] 3628 3629 # For s_and_t_channels (to be used later) use only first config 3630 s_and_t_channels.append([[s for s,t in stchannels if t != None][0], 3631 tchannels]) 3632 3633 # Make sure empty_verts is same length as real vertices 3634 if any([s for s,t in stchannels]): 3635 empty_verts[:] = [None]*max([len(s) for s,t in stchannels]) 3636 3637 # Reorganize s-channel vertices to get a list of all 3638 # subprocesses for each vertex 3639 schannels = list(zip(*[s for s,t in stchannels])) 3640 else: 3641 schannels = [] 3642 3643 allchannels = schannels 3644 if len(tchannels) > 1: 3645 # Write out tchannels only if there are any non-trivial ones 3646 allchannels = schannels + tchannels 3647 3648 # Write out propagators for s-channel and t-channel vertices 3649 3650 #lines.append("# Diagram %d" % (mapconfigs[iconfig])) 3651 # Correspondance between the config and the diagram = amp2 3652 lines.append("* %d %d " % (nconfigs, 3653 mapconfigs[iconfig])) 3654 3655 for verts in allchannels: 3656 if verts in schannels: 3657 vert = [v for v in verts if v][0] 3658 else: 3659 vert = verts 3660 daughters = [leg.get('number') for leg in vert.get('legs')[:-1]] 3661 last_leg = vert.get('legs')[-1] 3662 line=str(last_leg.get('number'))+" "+str(daughters[0])+" "+str(daughters[1]) 3663 # lines.append("data (iforest(i,%d,%d),i=1,%d)/%s/" % \ 3664 # (last_leg.get('number'), nconfigs, len(daughters), 3665 # ",".join([str(d) for d in daughters]))) 3666 3667 if last_leg.get('id') == 21 and 21 not in particle_dict: 3668 # Fake propagator used in multiparticle vertices 3669 mass = 'zero' 3670 width = 'zero' 3671 pow_part = 0 3672 else: 3673 if (last_leg.get('id')!=7): 3674 particle = particle_dict[last_leg.get('id')] 3675 # Get mass 3676 mass = particle.get('mass') 3677 # Get width 3678 width = particle.get('width') 3679 else : # fake propagator used in multiparticle vertices 3680 mass= 'zero' 3681 width= 'zero' 3682 3683 line=line+" "+mass+" "+width+" " 3684 3685 if verts in schannels: 3686 pdgs = [] 3687 for v in verts: 3688 if v: 3689 pdgs.append(v.get('legs')[-1].get('id')) 3690 else: 3691 pdgs.append(0) 3692 lines.append(line+" S "+str(last_leg.get('id'))) 3693 # lines.append("data (sprop(i,%d,%d),i=1,%d)/%s/" % \ 3694 # (last_leg.get('number'), nconfigs, nsubprocs, 3695 # ",".join([str(d) for d in pdgs]))) 3696 # lines.append("data tprid(%d,%d)/0/" % \ 3697 # (last_leg.get('number'), nconfigs)) 3698 elif verts in tchannels[:-1]: 3699 lines.append(line+" T "+str(last_leg.get('id'))) 3700 # lines.append("data tprid(%d,%d)/%d/" % \ 3701 # (last_leg.get('number'), nconfigs, 3702 # abs(last_leg.get('id')))) 3703 # lines.append("data (sprop(i,%d,%d),i=1,%d)/%s/" % \ 3704 # (last_leg.get('number'), nconfigs, nsubprocs, 3705 # ",".join(['0'] * nsubprocs))) 3706 3707 # Write out number of configs 3708 # lines.append("# Number of configs") 3709 # lines.append("data mapconfig(0)/%d/" % nconfigs) 3710 lines.append(" * ") # a line with just a star indicates this is the end of file 3711 # Write the file 3712 writer.writelines(lines) 3713 3714 return s_and_t_channels
3715
3716 3717 3718 #=============================================================================== 3719 # ProcessExporterFortranME 3720 #=============================================================================== 3721 -class ProcessExporterFortranME(ProcessExporterFortran):
3722 """Class to take care of exporting a set of matrix elements to 3723 MadEvent format.""" 3724 3725 matrix_file = "matrix_madevent_v4.inc" 3726 done_warning_tchannel = False 3727 3728 default_opt = {'clean': False, 'complex_mass':False, 3729 'export_format':'madevent', 'mp': False, 3730 'v5_model': True, 3731 'output_options':{}, 3732 'hel_recycling': False 3733 } 3734 jamp_optim = True 3735
3736 - def __init__(self, dir_path = "", opt=None):
3737 3738 super(ProcessExporterFortranME, self).__init__(dir_path, opt) 3739 3740 # check and format the hel_recycling options as it should if provided 3741 if opt and isinstance(opt['output_options'], dict) and \ 3742 'hel_recycling' in opt['output_options']: 3743 self.opt['hel_recycling'] = banner_mod.ConfigFile.format_variable( 3744 opt['output_options']['hel_recycling'], bool, 'hel_recycling') 3745 3746 if opt and isinstance(opt['output_options'], dict) and \ 3747 't_strategy' in opt['output_options']: 3748 self.opt['t_strategy'] = banner_mod.ConfigFile.format_variable( 3749 opt['output_options']['t_strategy'], int, 't_strategy')
3750 3751 # helper function for customise helas writter 3752 @staticmethod
3753 - def custom_helas_call(call, arg):
3754 if arg['mass'] == '%(M)s,%(W)s,': 3755 arg['mass'] = '%(M)s, fk_%(W)s,' 3756 elif '%(W)s' in arg['mass']: 3757 raise Exception 3758 return call, arg
3759
3760 - def copy_template(self, model):
3761 """Additional actions needed for setup of Template 3762 """ 3763 3764 super(ProcessExporterFortranME, self).copy_template(model) 3765 3766 # File created from Template (Different in some child class) 3767 filename = pjoin(self.dir_path,'Source','run_config.inc') 3768 self.write_run_config_file(writers.FortranWriter(filename)) 3769 3770 # The next file are model dependant (due to SLAH convention) 3771 self.model_name = model.get('name') 3772 # Add the symmetry.f 3773 filename = pjoin(self.dir_path,'SubProcesses','symmetry.f') 3774 self.write_symmetry(writers.FortranWriter(filename)) 3775 # 3776 filename = pjoin(self.dir_path,'SubProcesses','addmothers.f') 3777 self.write_addmothers(writers.FortranWriter(filename)) 3778 # Copy the different python file in the Template 3779 self.copy_python_file()
3780 3781 3782 3783 3784 3785 3786 #=========================================================================== 3787 # generate_subprocess_directory 3788 #===========================================================================
3789 - def copy_python_file(self):
3790 """copy the python file require for the Template""" 3791 3792 # madevent interface 3793 cp(_file_path+'/interface/madevent_interface.py', 3794 self.dir_path+'/bin/internal/madevent_interface.py') 3795 cp(_file_path+'/interface/extended_cmd.py', 3796 self.dir_path+'/bin/internal/extended_cmd.py') 3797 cp(_file_path+'/interface/common_run_interface.py', 3798 self.dir_path+'/bin/internal/common_run_interface.py') 3799 cp(_file_path+'/various/misc.py', self.dir_path+'/bin/internal/misc.py') 3800 cp(_file_path+'/iolibs/files.py', self.dir_path+'/bin/internal/files.py') 3801 cp(_file_path+'/iolibs/save_load_object.py', 3802 self.dir_path+'/bin/internal/save_load_object.py') 3803 cp(_file_path+'/iolibs/file_writers.py', 3804 self.dir_path+'/bin/internal/file_writers.py') 3805 #model file 3806 cp(_file_path+'../models/check_param_card.py', 3807 self.dir_path+'/bin/internal/check_param_card.py') 3808 3809 #copy all the file present in madevent directory 3810 for name in os.listdir(pjoin(_file_path, 'madevent')): 3811 if name not in ['__init__.py'] and name.endswith('.py'): 3812 cp(_file_path+'/madevent/'+name, self.dir_path+'/bin/internal/') 3813 3814 #madevent file 3815 cp(_file_path+'/__init__.py', self.dir_path+'/bin/internal/__init__.py') 3816 cp(_file_path+'/various/lhe_parser.py', 3817 self.dir_path+'/bin/internal/lhe_parser.py') 3818 cp(_file_path+'/various/banner.py', 3819 self.dir_path+'/bin/internal/banner.py') 3820 cp(_file_path+'/various/histograms.py', 3821 self.dir_path+'/bin/internal/histograms.py') 3822 cp(_file_path+'/various/plot_djrs.py', 3823 self.dir_path+'/bin/internal/plot_djrs.py') 3824 cp(_file_path+'/various/systematics.py', self.dir_path+'/bin/internal/systematics.py') 3825 3826 cp(_file_path+'/various/cluster.py', 3827 self.dir_path+'/bin/internal/cluster.py') 3828 cp(_file_path+'/madevent/combine_runs.py', 3829 self.dir_path+'/bin/internal/combine_runs.py') 3830 # logging configuration 3831 cp(_file_path+'/interface/.mg5_logging.conf', 3832 self.dir_path+'/bin/internal/me5_logging.conf') 3833 cp(_file_path+'/interface/coloring_logging.py', 3834 self.dir_path+'/bin/internal/coloring_logging.py') 3835 # shower card and FO_analyse_card. 3836 # Although not needed, it is imported by banner.py 3837 cp(_file_path+'/various/shower_card.py', 3838 self.dir_path+'/bin/internal/shower_card.py') 3839 cp(_file_path+'/various/FO_analyse_card.py', 3840 self.dir_path+'/bin/internal/FO_analyse_card.py')
3841 3842
3843 - def convert_model(self, model, wanted_lorentz = [], 3844 wanted_couplings = []):
3845 3846 super(ProcessExporterFortranME,self).convert_model(model, 3847 wanted_lorentz, wanted_couplings) 3848 3849 IGNORE_PATTERNS = ('*.pyc','*.dat','*.py~') 3850 try: 3851 shutil.rmtree(pjoin(self.dir_path,'bin','internal','ufomodel')) 3852 except OSError as error: 3853 pass 3854 model_path = model.get('modelpath') 3855 # This is not safe if there is a '##' or '-' in the path. 3856 shutil.copytree(model_path, 3857 pjoin(self.dir_path,'bin','internal','ufomodel'), 3858 ignore=shutil.ignore_patterns(*IGNORE_PATTERNS)) 3859 if hasattr(model, 'restrict_card'): 3860 out_path = pjoin(self.dir_path, 'bin', 'internal','ufomodel', 3861 'restrict_default.dat') 3862 if isinstance(model.restrict_card, check_param_card.ParamCard): 3863 model.restrict_card.write(out_path) 3864 else: 3865 files.cp(model.restrict_card, out_path)
3866 3867 #=========================================================================== 3868 # export model files 3869 #===========================================================================
3870 - def export_model_files(self, model_path):
3871 """export the model dependent files""" 3872 3873 super(ProcessExporterFortranME,self).export_model_files(model_path) 3874 3875 # Add the routine update_as_param in v4 model 3876 # This is a function created in the UFO 3877 text=""" 3878 subroutine update_as_param() 3879 call setpara('param_card.dat',.false.) 3880 return 3881 end 3882 """ 3883 ff = open(pjoin(self.dir_path, 'Source', 'MODEL', 'couplings.f'),'a') 3884 ff.write(text) 3885 ff.close() 3886 3887 # Add the symmetry.f 3888 filename = pjoin(self.dir_path,'SubProcesses','symmetry.f') 3889 self.write_symmetry(writers.FortranWriter(filename), v5=False) 3890 3891 # Modify setrun.f 3892 text = open(pjoin(self.dir_path,'Source','setrun.f')).read() 3893 text = text.replace('call setpara(param_card_name)', 'call setpara(param_card_name, .true.)') 3894 fsock = open(pjoin(self.dir_path,'Source','setrun.f'), 'w') 3895 fsock.write(text) 3896 fsock.close() 3897 3898 self.make_model_symbolic_link()
3899 3900 #=========================================================================== 3901 # generate_subprocess_directory 3902 #===========================================================================
3903 - def generate_subprocess_directory(self, matrix_element, 3904 fortran_model, 3905 me_number):
3906 """Generate the Pxxxxx directory for a subprocess in MG4 madevent, 3907 including the necessary matrix.f and various helper files""" 3908 3909 cwd = os.getcwd() 3910 path = pjoin(self.dir_path, 'SubProcesses') 3911 3912 3913 if not self.model: 3914 self.model = matrix_element.get('processes')[0].get('model') 3915 3916 #os.chdir(path) 3917 # Create the directory PN_xx_xxxxx in the specified path 3918 subprocdir = "P%s" % matrix_element.get('processes')[0].shell_string() 3919 try: 3920 os.mkdir(pjoin(path,subprocdir)) 3921 except os.error as error: 3922 logger.warning(error.strerror + " " + subprocdir) 3923 3924 #try: 3925 # os.chdir(subprocdir) 3926 #except os.error: 3927 # logger.error('Could not cd to directory %s' % subprocdir) 3928 # return 0 3929 3930 logger.info('Creating files in directory %s' % subprocdir) 3931 Ppath = pjoin(path, subprocdir) 3932 3933 # Extract number of external particles 3934 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 3935 3936 # Add the driver.f 3937 ncomb = matrix_element.get_helicity_combinations() 3938 filename = pjoin(Ppath,'driver.f') 3939 self.write_driver(writers.FortranWriter(filename),ncomb,n_grouped_proc=1, 3940 v5=self.opt['v5_model']) 3941 3942 3943 # Create the matrix.f file, auto_dsig.f file and all inc files 3944 if self.opt['hel_recycling']: 3945 filename = pjoin(Ppath, 'matrix_orig.f') 3946 else: 3947 filename = pjoin(Ppath, 'matrix.f') 3948 calls, ncolor = \ 3949 self.write_matrix_element_v4(writers.FortranWriter(filename), 3950 matrix_element, fortran_model, subproc_number = me_number) 3951 3952 filename = pjoin(Ppath, 'auto_dsig.f') 3953 self.write_auto_dsig_file(writers.FortranWriter(filename), 3954 matrix_element) 3955 3956 filename = pjoin(Ppath, 'configs.inc') 3957 mapconfigs, (s_and_t_channels, nqcd_list) = self.write_configs_file(\ 3958 writers.FortranWriter(filename), 3959 matrix_element) 3960 3961 filename = pjoin(Ppath, 'config_nqcd.inc') 3962 self.write_config_nqcd_file(writers.FortranWriter(filename), 3963 nqcd_list) 3964 3965 filename = pjoin(Ppath, 'config_subproc_map.inc') 3966 self.write_config_subproc_map_file(writers.FortranWriter(filename), 3967 s_and_t_channels) 3968 3969 filename = pjoin(Ppath, 'coloramps.inc') 3970 self.write_coloramps_file(writers.FortranWriter(filename), 3971 mapconfigs, 3972 matrix_element) 3973 3974 filename = pjoin(Ppath, 'get_color.f') 3975 self.write_colors_file(writers.FortranWriter(filename), 3976 matrix_element) 3977 3978 filename = pjoin(Ppath, 'decayBW.inc') 3979 self.write_decayBW_file(writers.FortranWriter(filename), 3980 s_and_t_channels) 3981 3982 filename = pjoin(Ppath, 'dname.mg') 3983 self.write_dname_file(writers.FileWriter(filename), 3984 "P"+matrix_element.get('processes')[0].shell_string()) 3985 3986 filename = pjoin(Ppath, 'iproc.dat') 3987 self.write_iproc_file(writers.FortranWriter(filename), 3988 me_number) 3989 3990 filename = pjoin(Ppath, 'leshouche.inc') 3991 self.write_leshouche_file(writers.FortranWriter(filename), 3992 matrix_element) 3993 3994 filename = pjoin(Ppath, 'maxamps.inc') 3995 self.write_maxamps_file(writers.FortranWriter(filename), 3996 len(matrix_element.get('diagrams')), 3997 ncolor, 3998 len(matrix_element.get('processes')), 3999 1) 4000 4001 filename = pjoin(Ppath, 'mg.sym') 4002 self.write_mg_sym_file(writers.FortranWriter(filename), 4003 matrix_element) 4004 4005 filename = pjoin(Ppath, 'ncombs.inc') 4006 self.write_ncombs_file(writers.FortranWriter(filename), 4007 nexternal) 4008 4009 filename = pjoin(Ppath, 'nexternal.inc') 4010 self.write_nexternal_file(writers.FortranWriter(filename), 4011 nexternal, ninitial) 4012 4013 filename = pjoin(Ppath, 'ngraphs.inc') 4014 self.write_ngraphs_file(writers.FortranWriter(filename), 4015 len(mapconfigs)) 4016 4017 4018 filename = pjoin(Ppath, 'pmass.inc') 4019 self.write_pmass_file(writers.FortranWriter(filename), 4020 matrix_element) 4021 4022 filename = pjoin(Ppath, 'props.inc') 4023 self.write_props_file(writers.FortranWriter(filename), 4024 matrix_element, 4025 s_and_t_channels) 4026 4027 # Find config symmetries and permutations 4028 symmetry, perms, ident_perms = \ 4029 diagram_symmetry.find_symmetry(matrix_element) 4030 4031 filename = pjoin(Ppath, 'symswap.inc') 4032 self.write_symswap_file(writers.FortranWriter(filename), 4033 ident_perms) 4034 4035 filename = pjoin(Ppath, 'symfact_orig.dat') 4036 self.write_symfact_file(open(filename, 'w'), symmetry) 4037 4038 # Generate diagrams 4039 if not 'noeps' in self.opt['output_options'] or self.opt['output_options']['noeps'] != 'True': 4040 filename = pjoin(Ppath, "matrix.ps") 4041 plot = draw.MultiEpsDiagramDrawer(matrix_element.get('base_amplitude').\ 4042 get('diagrams'), 4043 filename, 4044 model=matrix_element.get('processes')[0].\ 4045 get('model'), 4046 amplitude=True) 4047 logger.info("Generating Feynman diagrams for " + \ 4048 matrix_element.get('processes')[0].nice_string()) 4049 plot.draw() 4050 4051 self.link_files_in_SubProcess(Ppath) 4052 4053 #import nexternal/leshouche in Source 4054 ln(pjoin(Ppath,'nexternal.inc'), pjoin(self.dir_path,'Source'), log=False) 4055 ln(pjoin(Ppath,'leshouche.inc'), pjoin(self.dir_path,'Source'), log=False) 4056 ln(pjoin(Ppath,'maxamps.inc'), pjoin(self.dir_path,'Source'), log=False) 4057 # Return to SubProcesses dir 4058 #os.chdir(os.path.pardir) 4059 4060 # Add subprocess to subproc.mg 4061 filename = pjoin(path, 'subproc.mg') 4062 files.append_to_file(filename, 4063 self.write_subproc, 4064 subprocdir) 4065 4066 # Return to original dir 4067 #os.chdir(cwd) 4068 4069 # Generate info page 4070 gen_infohtml.make_info_html(self.dir_path) 4071 4072 4073 if not calls: 4074 calls = 0 4075 return calls
4076 4077 link_Sub_files = ['addmothers.f', 4078 'cluster.f', 4079 'cluster.inc', 4080 'coupl.inc', 4081 'cuts.f', 4082 'cuts.inc', 4083 'genps.f', 4084 'genps.inc', 4085 'idenparts.f', 4086 'initcluster.f', 4087 'makefile', 4088 'message.inc', 4089 'myamp.f', 4090 'reweight.f', 4091 'run.inc', 4092 'maxconfigs.inc', 4093 'maxparticles.inc', 4094 'run_config.inc', 4095 'lhe_event_infos.inc', 4096 'setcuts.f', 4097 'setscales.f', 4098 'sudakov.inc', 4099 'symmetry.f', 4100 'unwgt.f', 4101 'dummy_fct.f' 4102 ] 4103 4117 4118
4119 - def finalize(self, matrix_elements, history, mg5options, flaglist):
4120 """Finalize ME v4 directory by creating jpeg diagrams, html 4121 pages,proc_card_mg5.dat and madevent.tar.gz.""" 4122 4123 if 'nojpeg' in flaglist: 4124 makejpg = False 4125 else: 4126 makejpg = True 4127 if 'online' in flaglist: 4128 online = True 4129 else: 4130 online = False 4131 4132 compiler = {'fortran': mg5options['fortran_compiler'], 4133 'cpp': mg5options['cpp_compiler'], 4134 'f2py': mg5options['f2py_compiler']} 4135 4136 # indicate that the output type is not grouped 4137 if not isinstance(self, ProcessExporterFortranMEGroup): 4138 self.proc_characteristic['grouped_matrix'] = False 4139 self.proc_characteristic['nlo_mixed_expansion'] = mg5options['nlo_mixed_expansion'] 4140 4141 self.proc_characteristic['complex_mass_scheme'] = mg5options['complex_mass_scheme'] 4142 4143 # set limitation linked to the model 4144 4145 4146 # indicate the PDG of all initial particle 4147 try: 4148 pdgs1 = [p.get_initial_pdg(1) for me in matrix_elements for m in me.get('matrix_elements') for p in m.get('processes') if p.get_initial_pdg(1)] 4149 pdgs2 = [p.get_initial_pdg(2) for me in matrix_elements for m in me.get('matrix_elements') for p in m.get('processes') if p.get_initial_pdg(2)] 4150 except AttributeError: 4151 pdgs1 = [p.get_initial_pdg(1) for m in matrix_elements.get('matrix_elements') for p in m.get('processes') if p.get_initial_pdg(1)] 4152 pdgs2 = [p.get_initial_pdg(2) for m in matrix_elements.get('matrix_elements') for p in m.get('processes') if p.get_initial_pdg(2)] 4153 self.proc_characteristic['pdg_initial1'] = pdgs1 4154 self.proc_characteristic['pdg_initial2'] = pdgs2 4155 4156 4157 modelname = self.opt['model'] 4158 if modelname == 'mssm' or modelname.startswith('mssm-'): 4159 param_card = pjoin(self.dir_path, 'Cards','param_card.dat') 4160 mg5_param = pjoin(self.dir_path, 'Source', 'MODEL', 'MG5_param.dat') 4161 check_param_card.convert_to_mg5card(param_card, mg5_param) 4162 check_param_card.check_valid_param_card(mg5_param) 4163 4164 # Add the combine_events.f modify param_card path/number of @X 4165 filename = pjoin(self.dir_path,'Source','combine_events.f') 4166 try: 4167 nb_proc =[p.get('id') for me in matrix_elements for m in me.get('matrix_elements') for p in m.get('processes')] 4168 except AttributeError: 4169 nb_proc =[p.get('id') for m in matrix_elements.get('matrix_elements') for p in m.get('processes')] 4170 nb_proc = len(set(nb_proc)) 4171 self.write_combine_events(writers.FortranWriter(filename), nb_proc) # already formatted 4172 # Write maxconfigs.inc based on max of ME's/subprocess groups 4173 filename = pjoin(self.dir_path,'Source','maxconfigs.inc') 4174 self.write_maxconfigs_file(writers.FortranWriter(filename), 4175 matrix_elements) 4176 4177 # Write maxparticles.inc based on max of ME's/subprocess groups 4178 filename = pjoin(self.dir_path,'Source','maxparticles.inc') 4179 self.write_maxparticles_file(writers.FortranWriter(filename), 4180 matrix_elements) 4181 4182 # Touch "done" file 4183 os.system('touch %s/done' % pjoin(self.dir_path,'SubProcesses')) 4184 4185 # Check for compiler 4186 self.set_compiler(compiler) 4187 self.set_cpp_compiler(compiler['cpp']) 4188 4189 4190 old_pos = os.getcwd() 4191 subpath = pjoin(self.dir_path, 'SubProcesses') 4192 4193 P_dir_list = [proc for proc in os.listdir(subpath) 4194 if os.path.isdir(pjoin(subpath,proc)) and proc[0] == 'P'] 4195 4196 devnull = os.open(os.devnull, os.O_RDWR) 4197 # Convert the poscript in jpg files (if authorize) 4198 if makejpg: 4199 try: 4200 os.remove(pjoin(self.dir_path,'HTML','card.jpg')) 4201 except Exception as error: 4202 pass 4203 4204 if misc.which('gs'): 4205 logger.info("Generate jpeg diagrams") 4206 for Pdir in P_dir_list: 4207 misc.call([pjoin(self.dir_path, 'bin', 'internal', 'gen_jpeg-pl')], 4208 stdout = devnull, cwd=pjoin(subpath, Pdir)) 4209 4210 logger.info("Generate web pages") 4211 # Create the WebPage using perl script 4212 4213 misc.call([pjoin(self.dir_path, 'bin', 'internal', 'gen_cardhtml-pl')], \ 4214 stdout = devnull,cwd=pjoin(self.dir_path)) 4215 4216 #os.chdir(os.path.pardir) 4217 4218 obj = gen_infohtml.make_info_html(self.dir_path) 4219 4220 if online: 4221 nb_channel = obj.rep_rule['nb_gen_diag'] 4222 open(pjoin(self.dir_path, 'Online'),'w').write(str(nb_channel)) 4223 #add the information to proc_charac 4224 self.proc_characteristic['nb_channel'] = obj.rep_rule['nb_gen_diag'] 4225 4226 # Write command history as proc_card_mg5 4227 if os.path.isdir(pjoin(self.dir_path,'Cards')): 4228 output_file = pjoin(self.dir_path,'Cards', 'proc_card_mg5.dat') 4229 history.write(output_file) 4230 4231 misc.call([pjoin(self.dir_path, 'bin', 'internal', 'gen_cardhtml-pl')], 4232 stdout = devnull) 4233 4234 #crate the proc_characteristic file 4235 self.create_proc_charac(matrix_elements, history) 4236 4237 # create the run_card 4238 ProcessExporterFortran.finalize(self, matrix_elements, history, mg5options, flaglist) 4239 4240 # Run "make" to generate madevent.tar.gz file 4241 if os.path.exists(pjoin(self.dir_path,'SubProcesses', 'subproc.mg')): 4242 if os.path.exists(pjoin(self.dir_path,'madevent.tar.gz')): 4243 os.remove(pjoin(self.dir_path,'madevent.tar.gz')) 4244 misc.call([os.path.join(self.dir_path, 'bin', 'internal', 'make_madevent_tar')], 4245 stdout = devnull, cwd=self.dir_path) 4246 4247 misc.call([pjoin(self.dir_path, 'bin', 'internal', 'gen_cardhtml-pl')], 4248 stdout = devnull, cwd=self.dir_path)
4249 4250 4251 4252 4253 4254 4255 #return to the initial dir 4256 #os.chdir(old_pos) 4257 4258 #=========================================================================== 4259 # write_matrix_element_v4 4260 #===========================================================================
4261 - def write_matrix_element_v4(self, writer, matrix_element, fortran_model, 4262 proc_id = "", config_map = [], subproc_number = ""):
4263 """Export a matrix element to a matrix.f file in MG4 madevent format""" 4264 4265 if not matrix_element.get('processes') or \ 4266 not matrix_element.get('diagrams'): 4267 return 0 4268 4269 if writer: 4270 if not isinstance(writer, writers.FortranWriter): 4271 raise writers.FortranWriter.FortranWriterError(\ 4272 "writer not FortranWriter") 4273 # Set lowercase/uppercase Fortran code 4274 writers.FortranWriter.downcase = False 4275 4276 # check if MLM/.../ is supported for this matrix-element and update associate flag 4277 if self.model and 'MLM' in self.model["limitations"]: 4278 if 'MLM' not in self.proc_characteristic["limitations"]: 4279 used_couplings = matrix_element.get_used_couplings(output="set") 4280 for vertex in self.model.get('interactions'): 4281 particles = [p for p in vertex.get('particles')] 4282 if 21 in [p.get('pdg_code') for p in particles]: 4283 colors = [par.get('color') for par in particles] 4284 if 1 in colors: 4285 continue 4286 elif 'QCD' not in vertex.get('orders'): 4287 for bad_coup in vertex.get('couplings').values(): 4288 if bad_coup in used_couplings: 4289 self.proc_characteristic["limitations"].append('MLM') 4290 break 4291 4292 # The proc prefix is not used for MadEvent output so it can safely be set 4293 # to an empty string. 4294 replace_dict = {'proc_prefix':''} 4295 4296 4297 # Extract helas calls 4298 helas_calls = fortran_model.get_matrix_element_calls(\ 4299 matrix_element) 4300 if fortran_model.width_tchannel_set_tozero and not ProcessExporterFortranME.done_warning_tchannel: 4301 logger.info("Some T-channel width have been set to zero [new since 2.8.0]\n if you want to keep this width please set \"zerowidth_tchannel\" to False", '$MG:BOLD') 4302 ProcessExporterFortranME.done_warning_tchannel = True 4303 4304 replace_dict['helas_calls'] = "\n".join(helas_calls) 4305 4306 4307 #adding the support for the fake width (forbidding too small width) 4308 mass_width = matrix_element.get_all_mass_widths() 4309 mass_width = sorted(list(mass_width)) 4310 width_list = set([e[1] for e in mass_width]) 4311 4312 replace_dict['fake_width_declaration'] = \ 4313 (' double precision fk_%s \n' * len(width_list)) % tuple(width_list) 4314 replace_dict['fake_width_declaration'] += \ 4315 (' save fk_%s \n' * len(width_list)) % tuple(width_list) 4316 fk_w_defs = [] 4317 one_def = ' IF(%(w)s.ne.0d0) fk_%(w)s = SIGN(MAX(ABS(%(w)s), ABS(%(m)s*small_width_treatment)), %(w)s)' 4318 for m, w in mass_width: 4319 if w == 'zero': 4320 if ' fk_zero = 0d0' not in fk_w_defs: 4321 fk_w_defs.append(' fk_zero = 0d0') 4322 continue 4323 fk_w_defs.append(one_def %{'m':m, 'w':w}) 4324 replace_dict['fake_width_definitions'] = '\n'.join(fk_w_defs) 4325 4326 # Extract version number and date from VERSION file 4327 info_lines = self.get_mg5_info_lines() 4328 replace_dict['info_lines'] = info_lines 4329 4330 # Extract process info lines 4331 process_lines = self.get_process_info_lines(matrix_element) 4332 replace_dict['process_lines'] = process_lines 4333 4334 # Set proc_id 4335 replace_dict['proc_id'] = proc_id 4336 4337 # Extract ncomb 4338 ncomb = matrix_element.get_helicity_combinations() 4339 replace_dict['ncomb'] = ncomb 4340 4341 # Extract helicity lines 4342 helicity_lines = self.get_helicity_lines(matrix_element) 4343 replace_dict['helicity_lines'] = helicity_lines 4344 4345 # Extract IC line 4346 ic_line = self.get_ic_line(matrix_element) 4347 replace_dict['ic_line'] = ic_line 4348 4349 # Extract overall denominator 4350 # Averaging initial state color, spin, and identical FS particles 4351 den_factor_line = self.get_den_factor_line(matrix_element) 4352 replace_dict['den_factor_line'] = den_factor_line 4353 4354 # Extract ngraphs 4355 ngraphs = matrix_element.get_number_of_amplitudes() 4356 replace_dict['ngraphs'] = ngraphs 4357 4358 # Extract ndiags 4359 ndiags = len(matrix_element.get('diagrams')) 4360 replace_dict['ndiags'] = ndiags 4361 4362 # Set define_iconfigs_lines 4363 replace_dict['define_iconfigs_lines'] = \ 4364 """INTEGER MAPCONFIG(0:LMAXCONFIGS), ICONFIG 4365 COMMON/TO_MCONFIGS/MAPCONFIG, ICONFIG""" 4366 4367 if proc_id: 4368 # Set lines for subprocess group version 4369 # Set define_iconfigs_lines 4370 replace_dict['define_iconfigs_lines'] += \ 4371 """\nINTEGER SUBDIAG(MAXSPROC),IB(2) 4372 COMMON/TO_SUB_DIAG/SUBDIAG,IB""" 4373 # Set set_amp2_line 4374 replace_dict['set_amp2_line'] = "ANS=ANS*AMP2(SUBDIAG(%s))/XTOT" % \ 4375 proc_id 4376 else: 4377 # Standard running 4378 # Set set_amp2_line 4379 replace_dict['set_amp2_line'] = "ANS=ANS*AMP2(MAPCONFIG(ICONFIG))/XTOT" 4380 4381 # Extract nwavefuncs 4382 nwavefuncs = matrix_element.get_number_of_wavefunctions() 4383 replace_dict['nwavefuncs'] = nwavefuncs 4384 4385 # Extract ncolor 4386 ncolor = max(1, len(matrix_element.get('color_basis'))) 4387 replace_dict['ncolor'] = ncolor 4388 4389 # Extract color data lines 4390 color_data_lines = self.get_color_data_lines(matrix_element) 4391 replace_dict['color_data_lines'] = "\n".join(color_data_lines) 4392 4393 4394 # Set the size of Wavefunction 4395 if not self.model or any([p.get('spin') in [4,5] for p in self.model.get('particles') if p]): 4396 replace_dict['wavefunctionsize'] = 18 4397 else: 4398 replace_dict['wavefunctionsize'] = 6 4399 4400 # Extract amp2 lines 4401 amp2_lines = self.get_amp2_lines(matrix_element, config_map, replace_dict) 4402 replace_dict['amp2_lines'] = '\n'.join(amp2_lines) 4403 4404 # The JAMP definition depends on the splitting order 4405 split_orders=matrix_element.get('processes')[0].get('split_orders') 4406 if len(split_orders)>0: 4407 squared_orders, amp_orders = matrix_element.get_split_orders_mapping() 4408 replace_dict['chosen_so_configs']=self.set_chosen_SO_index( 4409 matrix_element.get('processes')[0],squared_orders) 4410 replace_dict['select_configs_if'] = ' IF (CHOSEN_SO_CONFIGS(SQSOINDEX%(proc_id)s(M,N))) THEN' % replace_dict 4411 replace_dict['select_configs_endif'] = ' endif' 4412 else: 4413 # Consider the output of a dummy order 'ALL_ORDERS' for which we 4414 # set all amplitude order to weight 1 and only one squared order 4415 # contribution which is of course ALL_ORDERS=2. 4416 squared_orders = [(2,),] 4417 amp_orders = [((1,),tuple(range(1,ngraphs+1)))] 4418 replace_dict['chosen_so_configs'] = '.TRUE.' 4419 # addtionally set the function to NOT be called 4420 replace_dict['select_configs_if'] = '' 4421 replace_dict['select_configs_endif'] = '' 4422 4423 replace_dict['nAmpSplitOrders']=len(amp_orders) 4424 replace_dict['nSqAmpSplitOrders']=len(squared_orders) 4425 replace_dict['split_order_str_list']=str(split_orders) 4426 replace_dict['nSplitOrders']=max(len(split_orders),1) 4427 amp_so = self.get_split_orders_lines( 4428 [amp_order[0] for amp_order in amp_orders],'AMPSPLITORDERS') 4429 sqamp_so = self.get_split_orders_lines(squared_orders,'SQSPLITORDERS') 4430 replace_dict['ampsplitorders']='\n'.join(amp_so) 4431 replace_dict['sqsplitorders']='\n'.join(sqamp_so) 4432 4433 4434 # Extract JAMP lines 4435 # If no split_orders then artificiall add one entry called 'ALL_ORDERS' 4436 jamp_lines, nb_temp = self.get_JAMP_lines_split_order(\ 4437 matrix_element,amp_orders,split_order_names= 4438 split_orders if len(split_orders)>0 else ['ALL_ORDERS']) 4439 replace_dict['jamp_lines'] = '\n'.join(jamp_lines) 4440 replace_dict['nb_temp_jamp'] = nb_temp 4441 4442 replace_dict['template_file'] = pjoin(_file_path, \ 4443 'iolibs/template_files/%s' % self.matrix_file) 4444 replace_dict['template_file2'] = pjoin(_file_path, \ 4445 'iolibs/template_files/split_orders_helping_functions.inc') 4446 4447 s1,s2 = matrix_element.get_spin_state_initial() 4448 replace_dict['nb_spin_state1'] = s1 4449 replace_dict['nb_spin_state2'] = s2 4450 4451 if writer: 4452 file = open(replace_dict['template_file']).read() 4453 file = file % replace_dict 4454 # Add the split orders helper functions. 4455 file = file + '\n' + open(replace_dict['template_file2'])\ 4456 .read()%replace_dict 4457 # Write the file 4458 writer.writelines(file) 4459 return len([call for call in helas_calls if call.find('#') != 0]), ncolor 4460 else: 4461 replace_dict['return_value'] = (len([call for call in helas_calls if call.find('#') != 0]), ncolor) 4462 return replace_dict
4463 4464 #=========================================================================== 4465 # write_auto_dsig_file 4466 #===========================================================================
4467 - def write_auto_dsig_file(self, writer, matrix_element, proc_id = ""):
4468 """Write the auto_dsig.f file for the differential cross section 4469 calculation, includes pdf call information""" 4470 4471 if not matrix_element.get('processes') or \ 4472 not matrix_element.get('diagrams'): 4473 return 0 4474 4475 nexternal, ninitial = matrix_element.get_nexternal_ninitial() 4476 self.proc_characteristic['ninitial'] = ninitial 4477 self.proc_characteristic['nexternal'] = max(self.proc_characteristic['nexternal'], nexternal) 4478 4479 # Add information relevant for MLM matching: 4480 # Maximum QCD power in all the contributions 4481 max_qcd_order = 0 4482 for diag in matrix_element.get('diagrams'): 4483 orders = diag.calculate_orders() 4484 if 'QCD' in orders: 4485 max_qcd_order = max(max_qcd_order,orders['QCD']) 4486 max_n_light_final_partons = max(len([1 for id in proc.get_final_ids() 4487 if proc.get('model').get_particle(id).get('mass')=='ZERO' and 4488 proc.get('model').get_particle(id).get('color')>1]) 4489 for proc in matrix_element.get('processes')) 4490 # Maximum number of final state light jets to be matched 4491 self.proc_characteristic['max_n_matched_jets'] = max( 4492 self.proc_characteristic['max_n_matched_jets'], 4493 min(max_qcd_order,max_n_light_final_partons)) 4494 4495 # List of default pdgs to be considered for the CKKWl merging cut 4496 self.proc_characteristic['colored_pdgs'] = \ 4497 sorted(list(set([abs(p.get('pdg_code')) for p in 4498 matrix_element.get('processes')[0].get('model').get('particles') if 4499 p.get('color')>1]))) 4500 4501 if ninitial < 1 or ninitial > 2: 4502 raise writers.FortranWriter.FortranWriterError("""Need ninitial = 1 or 2 to write auto_dsig file""") 4503 4504 replace_dict = {} 4505 4506 # Extract version number and date from VERSION file 4507 info_lines = self.get_mg5_info_lines() 4508 replace_dict['info_lines'] = info_lines 4509 4510 # Extract process info lines 4511 process_lines = self.get_process_info_lines(matrix_element) 4512 replace_dict['process_lines'] = process_lines 4513 4514 # Set proc_id 4515 replace_dict['proc_id'] = proc_id 4516 replace_dict['numproc'] = 1 4517 4518 # Set dsig_line 4519 if ninitial == 1: 4520 # No conversion, since result of decay should be given in GeV 4521 dsig_line = "pd(0)*dsiguu" 4522 else: 4523 # Convert result (in GeV) to pb 4524 dsig_line = "pd(0)*conv*dsiguu" 4525 4526 replace_dict['dsig_line'] = dsig_line 4527 4528 # Extract pdf lines 4529 pdf_vars, pdf_data, pdf_lines = \ 4530 self.get_pdf_lines(matrix_element, ninitial, proc_id != "") 4531 replace_dict['pdf_vars'] = pdf_vars 4532 replace_dict['pdf_data'] = pdf_data 4533 replace_dict['pdf_lines'] = pdf_lines 4534 4535 # Lines that differ between subprocess group and regular 4536 if proc_id: 4537 replace_dict['numproc'] = int(proc_id) 4538 replace_dict['passcuts_begin'] = "" 4539 replace_dict['passcuts_end'] = "" 4540 # Set lines for subprocess group version 4541 # Set define_iconfigs_lines 4542 replace_dict['define_subdiag_lines'] = \ 4543 """\nINTEGER SUBDIAG(MAXSPROC),IB(2) 4544 COMMON/TO_SUB_DIAG/SUBDIAG,IB""" 4545 replace_dict['cutsdone'] = "" 4546 else: 4547 replace_dict['passcuts_begin'] = "IF (PASSCUTS(PP)) THEN" 4548 replace_dict['passcuts_end'] = "ENDIF" 4549 replace_dict['define_subdiag_lines'] = "" 4550 replace_dict['cutsdone'] = " cutsdone=.false.\n cutspassed=.false." 4551 4552 if not isinstance(self, ProcessExporterFortranMEGroup): 4553 ncomb=matrix_element.get_helicity_combinations() 4554 replace_dict['read_write_good_hel'] = self.read_write_good_hel(ncomb) 4555 else: 4556 replace_dict['read_write_good_hel'] = "" 4557 4558 context = {'read_write_good_hel':True} 4559 4560 if writer: 4561 file = open(pjoin(_file_path, \ 4562 'iolibs/template_files/auto_dsig_v4.inc')).read() 4563 file = file % replace_dict 4564 4565 # Write the file 4566 writer.writelines(file, context=context) 4567 else: 4568 return replace_dict, context
4569 #=========================================================================== 4570 # write_coloramps_file 4571 #===========================================================================
4572 - def write_coloramps_file(self, writer, mapconfigs, matrix_element):
4573 """Write the coloramps.inc file for MadEvent""" 4574 4575 lines = self.get_icolamp_lines(mapconfigs, matrix_element, 1) 4576 lines.insert(0, "logical icolamp(%d,%d,1)" % \ 4577 (max(len(list(matrix_element.get('color_basis').keys())), 1), 4578 len(mapconfigs))) 4579 4580 4581 # Write the file 4582 writer.writelines(lines) 4583 4584 return True
4585 4586 #=========================================================================== 4587 # write_colors_file 4588 #===========================================================================
4589 - def write_colors_file(self, writer, matrix_elements):
4590 """Write the get_color.f file for MadEvent, which returns color 4591 for all particles used in the matrix element.""" 4592 4593 if isinstance(matrix_elements, helas_objects.HelasMatrixElement): 4594 matrix_elements = [matrix_elements] 4595 4596 model = matrix_elements[0].get('processes')[0].get('model') 4597 4598 # We need the both particle and antiparticle wf_ids, since the identity 4599 # depends on the direction of the wf. 4600 wf_ids = set(sum([sum([sum([[wf.get_pdg_code(),wf.get_anti_pdg_code()] \ 4601 for wf in d.get('wavefunctions')],[]) \ 4602 for d in me.get('diagrams')], []) \ 4603 for me in matrix_elements], [])) 4604 4605 leg_ids = set(sum([sum([sum([[l.get('id'), 4606 model.get_particle(l.get('id')).get_anti_pdg_code()] \ 4607 for l in p.get_legs_with_decays()], []) \ 4608 for p in me.get('processes')], []) \ 4609 for me in matrix_elements], [])) 4610 particle_ids = sorted(list(wf_ids.union(leg_ids))) 4611 4612 lines = """function get_color(ipdg) 4613 implicit none 4614 integer get_color, ipdg 4615 4616 if(ipdg.eq.%d)then 4617 get_color=%d 4618 return 4619 """ % (particle_ids[0], model.get_particle(particle_ids[0]).get_color()) 4620 4621 for part_id in particle_ids[1:]: 4622 lines += """else if(ipdg.eq.%d)then 4623 get_color=%d 4624 return 4625 """ % (part_id, model.get_particle(part_id).get_color()) 4626 # Dummy particle for multiparticle vertices with pdg given by 4627 # first code not in the model 4628 lines += """else if(ipdg.eq.%d)then 4629 c This is dummy particle used in multiparticle vertices 4630 get_color=2 4631 return 4632 """ % model.get_first_non_pdg() 4633 lines += """else 4634 write(*,*)'Error: No color given for pdg ',ipdg 4635 get_color=0 4636 return 4637 endif 4638 end 4639 """ 4640 4641 # Write the file 4642 writer.writelines(lines) 4643 4644 return True
4645 4646 #=========================================================================== 4647 # write_config_nqcd_file 4648 #===========================================================================
4649 - def write_config_nqcd_file(self, writer, nqcd_list):
4650 """Write the config_nqcd.inc with the number of QCD couplings 4651 for each config""" 4652 4653 lines = [] 4654 for iconf, n in enumerate(nqcd_list): 4655 lines.append("data nqcd(%d)/%d/" % (iconf+1, n)) 4656 4657 # Write the file 4658 writer.writelines(lines) 4659 4660 return True
4661 4662 #=========================================================================== 4663 # write_maxconfigs_file 4664 #===========================================================================
4665 - def write_maxconfigs_file(self, writer, matrix_elements):
4666 """Write the maxconfigs.inc file for MadEvent""" 4667 4668 if isinstance(matrix_elements, helas_objects.HelasMultiProcess): 4669 maxconfigs = max([me.get_num_configs() for me in \ 4670 matrix_elements.get('matrix_elements')]) 4671 else: 4672 maxconfigs = max([me.get_num_configs() for me in matrix_elements]) 4673 4674 lines = "integer lmaxconfigs\n" 4675 lines += "parameter(lmaxconfigs=%d)" % maxconfigs 4676 4677 # Write the file 4678 writer.writelines(lines) 4679 4680 return True
4681 4682 #=========================================================================== 4683 # read_write_good_hel 4684 #===========================================================================
4685 - def read_write_good_hel(self, ncomb):
4686 """return the code to read/write the good_hel common_block""" 4687 4688 convert = {'ncomb' : ncomb} 4689 output = """ 4690 subroutine write_good_hel(stream_id) 4691 implicit none 4692 integer stream_id 4693 INTEGER NCOMB 4694 PARAMETER ( NCOMB=%(ncomb)d) 4695 LOGICAL GOODHEL(NCOMB) 4696 INTEGER NTRY 4697 common/BLOCK_GOODHEL/NTRY,GOODHEL 4698 write(stream_id,*) GOODHEL 4699 return 4700 end 4701 4702 4703 subroutine read_good_hel(stream_id) 4704 implicit none 4705 include 'genps.inc' 4706 integer stream_id 4707 INTEGER NCOMB 4708 PARAMETER ( NCOMB=%(ncomb)d) 4709 LOGICAL GOODHEL(NCOMB) 4710 INTEGER NTRY 4711 common/BLOCK_GOODHEL/NTRY,GOODHEL 4712 read(stream_id,*) GOODHEL 4713 NTRY = MAXTRIES + 1 4714 return 4715 end 4716 4717 subroutine init_good_hel() 4718 implicit none 4719 INTEGER NCOMB 4720 PARAMETER ( NCOMB=%(ncomb)d) 4721 LOGICAL GOODHEL(NCOMB) 4722 INTEGER NTRY 4723 INTEGER I 4724 4725 do i=1,NCOMB 4726 GOODHEL(I) = .false. 4727 enddo 4728 NTRY = 0 4729 end 4730 4731 integer function get_maxsproc() 4732 implicit none 4733 get_maxsproc = 1 4734 return 4735 end 4736 4737 """ % convert 4738 4739 return output
4740 4741 #=========================================================================== 4742 # write_config_subproc_map_file 4743 #===========================================================================
4744 - def write_config_subproc_map_file(self, writer, s_and_t_channels):
4745 """Write a dummy config_subproc.inc file for MadEvent""" 4746 4747 lines = [] 4748 4749 for iconfig in range(len(s_and_t_channels)): 4750 lines.append("DATA CONFSUB(1,%d)/1/" % \ 4751 (iconfig + 1)) 4752 4753 # Write the file 4754 writer.writelines(lines) 4755 4756 return True
4757 4758 #=========================================================================== 4759 # write_configs_file 4760 #===========================================================================
4761 - def write_configs_file(self, writer, matrix_element):
4762 """Write the configs.inc file for MadEvent""" 4763 4764 # Extract number of external particles 4765 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 4766 4767 model = matrix_element.get('processes')[0].get('model') 4768 configs = [(i+1, d) for (i, d) in \ 4769 enumerate(matrix_element.get('diagrams'))] 4770 mapconfigs = [c[0] for c in configs] 4771 return mapconfigs, self.write_configs_file_from_diagrams(writer, 4772 [[c[1]] for c in configs], 4773 mapconfigs, 4774 nexternal, ninitial, 4775 model)
4776 4777 #=========================================================================== 4778 # write_run_configs_file 4779 #===========================================================================
4780 - def write_run_config_file(self, writer):
4781 """Write the run_configs.inc file for MadEvent""" 4782 4783 path = pjoin(_file_path,'iolibs','template_files','madevent_run_config.inc') 4784 4785 if self.proc_characteristic['loop_induced']: 4786 job_per_chan = 1 4787 else: 4788 job_per_chan = 5 4789 4790 if writer: 4791 text = open(path).read() % {'chanperjob': job_per_chan} 4792 writer.write(text) 4793 return True 4794 else: 4795 return {'chanperjob': job_per_chan}
4796 4797 #=========================================================================== 4798 # write_configs_file_from_diagrams 4799 #===========================================================================
4800 - def write_configs_file_from_diagrams(self, writer, configs, mapconfigs, 4801 nexternal, ninitial, model):
4802 """Write the actual configs.inc file. 4803 4804 configs is the diagrams corresponding to configs (each 4805 diagrams is a list of corresponding diagrams for all 4806 subprocesses, with None if there is no corresponding diagrams 4807 for a given process). 4808 mapconfigs gives the diagram number for each config. 4809 4810 For s-channels, we need to output one PDG for each subprocess in 4811 the subprocess group, in order to be able to pick the right 4812 one for multiprocesses.""" 4813 4814 lines = [] 4815 4816 s_and_t_channels = [] 4817 4818 nqcd_list = [] 4819 4820 vert_list = [max([d for d in config if d][0].get_vertex_leg_numbers()) \ 4821 for config in configs if [d for d in config if d][0].\ 4822 get_vertex_leg_numbers()!=[]] 4823 minvert = min(vert_list) if vert_list!=[] else 0 4824 4825 # Number of subprocesses 4826 nsubprocs = len(configs[0]) 4827 4828 nconfigs = 0 4829 4830 new_pdg = model.get_first_non_pdg() 4831 4832 for iconfig, helas_diags in enumerate(configs): 4833 if any([vert > minvert for vert in 4834 [d for d in helas_diags if d][0].get_vertex_leg_numbers()]): 4835 # Only 3-vertices allowed in configs.inc 4836 continue 4837 nconfigs += 1 4838 4839 # Need s- and t-channels for all subprocesses, including 4840 # those that don't contribute to this config 4841 empty_verts = [] 4842 stchannels = [] 4843 for h in helas_diags: 4844 if h: 4845 # get_s_and_t_channels gives vertices starting from 4846 # final state external particles and working inwards 4847 stchannels.append(h.get('amplitudes')[0].\ 4848 get_s_and_t_channels(ninitial, model, 4849 new_pdg)) 4850 else: 4851 stchannels.append((empty_verts, None)) 4852 4853 4854 # For t-channels, just need the first non-empty one 4855 tchannels = [t for s,t in stchannels if t != None][0] 4856 4857 # pass to ping-pong strategy for t-channel for 3 ore more T-channel 4858 # this is directly related to change in genps.f 4859 tstrat = self.opt.get('t_strategy', 0) 4860 tchannels, tchannels_strategy = ProcessExporterFortranME.reorder_tchannels(tchannels, tstrat, self.model) 4861 4862 # For s_and_t_channels (to be used later) use only first config 4863 s_and_t_channels.append([[s for s,t in stchannels if t != None][0], 4864 tchannels, tchannels_strategy]) 4865 4866 # Make sure empty_verts is same length as real vertices 4867 if any([s for s,t in stchannels]): 4868 empty_verts[:] = [None]*max([len(s) for s,t in stchannels]) 4869 4870 # Reorganize s-channel vertices to get a list of all 4871 # subprocesses for each vertex 4872 schannels = list(zip(*[s for s,t in stchannels])) 4873 else: 4874 schannels = [] 4875 4876 allchannels = schannels 4877 if len(tchannels) > 1: 4878 # Write out tchannels only if there are any non-trivial ones 4879 allchannels = schannels + tchannels 4880 4881 # Write out propagators for s-channel and t-channel vertices 4882 4883 lines.append("# Diagram %d" % (mapconfigs[iconfig])) 4884 # Correspondance between the config and the diagram = amp2 4885 lines.append("data mapconfig(%d)/%d/" % (nconfigs, 4886 mapconfigs[iconfig])) 4887 lines.append("data tstrategy(%d)/%d/" % (nconfigs, tchannels_strategy)) 4888 # Number of QCD couplings in this diagram 4889 nqcd = 0 4890 for h in helas_diags: 4891 if h: 4892 try: 4893 nqcd = h.calculate_orders()['QCD'] 4894 except KeyError: 4895 pass 4896 break 4897 else: 4898 continue 4899 4900 nqcd_list.append(nqcd) 4901 4902 for verts in allchannels: 4903 if verts in schannels: 4904 vert = [v for v in verts if v][0] 4905 else: 4906 vert = verts 4907 daughters = [leg.get('number') for leg in vert.get('legs')[:-1]] 4908 last_leg = vert.get('legs')[-1] 4909 lines.append("data (iforest(i,%d,%d),i=1,%d)/%s/" % \ 4910 (last_leg.get('number'), nconfigs, len(daughters), 4911 ",".join([str(d) for d in daughters]))) 4912 if verts in schannels: 4913 pdgs = [] 4914 for v in verts: 4915 if v: 4916 pdgs.append(v.get('legs')[-1].get('id')) 4917 else: 4918 pdgs.append(0) 4919 lines.append("data (sprop(i,%d,%d),i=1,%d)/%s/" % \ 4920 (last_leg.get('number'), nconfigs, nsubprocs, 4921 ",".join([str(d) for d in pdgs]))) 4922 lines.append("data tprid(%d,%d)/0/" % \ 4923 (last_leg.get('number'), nconfigs)) 4924 elif verts in tchannels[:-1]: 4925 lines.append("data tprid(%d,%d)/%d/" % \ 4926 (last_leg.get('number'), nconfigs, 4927 abs(last_leg.get('id')))) 4928 lines.append("data (sprop(i,%d,%d),i=1,%d)/%s/" % \ 4929 (last_leg.get('number'), nconfigs, nsubprocs, 4930 ",".join(['0'] * nsubprocs))) 4931 4932 # Write out number of configs 4933 lines.append("# Number of configs") 4934 lines.append("data mapconfig(0)/%d/" % nconfigs) 4935 4936 # Write the file 4937 writer.writelines(lines) 4938 4939 return s_and_t_channels, nqcd_list
4940 4941 4942 4943 #=========================================================================== 4944 # reoder t-channels 4945 #=========================================================================== 4946 4947 #ordering = 0 4948 @staticmethod
4949 - def reorder_tchannels(tchannels, tstrat, model):
4950 # no need to modified anything if 1 or less T-Channel 4951 #Note that this counts the number of vertex (one more vertex compare to T) 4952 #ProcessExporterFortranME.ordering +=1 4953 if len(tchannels) < 3 or tstrat == 2 or not model: 4954 return tchannels, 2 4955 elif tstrat == 1: 4956 return ProcessExporterFortranME.reorder_tchannels_flipside(tchannels), 1 4957 elif tstrat == -2: 4958 return ProcessExporterFortranME.reorder_tchannels_pingpong(tchannels), -2 4959 elif tstrat == -1: 4960 return ProcessExporterFortranME.reorder_tchannels_pingpong(tchannels, 1), -1 4961 elif len(tchannels) < 4: 4962 # 4963 first = tchannels[0]['legs'][1]['number'] 4964 t1 = tchannels[0]['legs'][-1]['id'] 4965 last = tchannels[-1]['legs'][1]['number'] 4966 t2 = tchannels[-1]['legs'][0]['id'] 4967 m1 = model.get_particle(t1).get('mass') == 'ZERO' 4968 m2 = model.get_particle(t2).get('mass') == 'ZERO' 4969 if m2 and not m1: 4970 return ProcessExporterFortranME.reorder_tchannels_flipside(tchannels), 1 4971 elif m1 and not m2: 4972 return tchannels, 2 4973 elif first < last: 4974 return ProcessExporterFortranME.reorder_tchannels_flipside(tchannels), 1 4975 else: 4976 return tchannels, 2 4977 else: 4978 first = tchannels[0]['legs'][1]['number'] 4979 t1 = tchannels[0]['legs'][-1]['id'] 4980 last = tchannels[-1]['legs'][1]['number'] 4981 t2 = tchannels[-1]['legs'][0]['id'] 4982 m1 = model.get_particle(t1).get('mass') == 'ZERO' 4983 m2 = model.get_particle(t2).get('mass') == 'ZERO' 4984 4985 t12 = tchannels[1]['legs'][-1]['id'] 4986 m12 = model.get_particle(t12).get('mass') == 'ZERO' 4987 t22 = tchannels[-2]['legs'][0]['id'] 4988 m22 = model.get_particle(t22).get('mass') == 'ZERO' 4989 if m2 and not m1: 4990 if m22: 4991 return ProcessExporterFortranME.reorder_tchannels_flipside(tchannels), 1 4992 else: 4993 return ProcessExporterFortranME.reorder_tchannels_pingpong(tchannels), -2 4994 elif m1 and not m2: 4995 if m12: 4996 return tchannels, 2 4997 else: 4998 return ProcessExporterFortranME.reorder_tchannels_pingpong(tchannels), -2 4999 elif m1 and m2 and len(tchannels) == 4 and not m12: # 3 T propa 5000 return ProcessExporterFortranME.reorder_tchannels_pingpong(tchannels), -2 5001 # this case seems quite sensitive we tested method 2 specifically and this was not helping in general 5002 elif not m1 and not m2 and len(tchannels) == 4 and m12: 5003 if first < last: 5004 return ProcessExporterFortranME.reorder_tchannels_flipside(tchannels), 1 5005 return tchannels, 2 5006 else: 5007 return ProcessExporterFortranME.reorder_tchannels_pingpong(tchannels), -2
5008 5009 5010 5011 5012 @staticmethod
5013 - def reorder_tchannels_flipside(tchannels):
5014 """change the tchannel ordering to pass to a ping-pong strategy. 5015 assume ninitial == 2 5016 5017 We assume that we receive something like this 5018 5019 1 ----- X ------- -2 5020 | 5021 | (-X) 5022 | 5023 X -------- 4 5024 | 5025 | (-X-1) 5026 | 5027 X --------- -1 5028 5029 X---------- 3 5030 | 5031 | (-N+2) 5032 | 5033 X --------- L 5034 | 5035 | (-N+1) 5036 | 5037 -N ----- X ------- P 5038 5039 coded as 5040 (1 -2 > -X) (-X 4 > -X-1) (-X-1 -1 > -X-2) ... 5041 ((-N+3) 3 > (-N+2)) ((-n+2) L > (-n+1)) ((-n+1) P > -N) 5042 5043 we want to convert this as: 5044 -N ----- X ------- -2 5045 | 5046 | (-N+1) 5047 | 5048 X -------- 4 5049 | 5050 | (-N+2) 5051 | 5052 X --------- -1 5053 5054 X---------- 3 5055 | 5056 | (-X-1) 5057 | 5058 X --------- L 5059 | 5060 | (-X) 5061 | 5062 2 ----- X ------- P 5063 5064 coded as 5065 ( 2 P > -X) (-X L > -X-1) (-X-1 3 > -X-2)... (-X-L -2 > -N) 5066 """ 5067 5068 # no need to modified anything if 1 or less T-Channel 5069 #Note that this counts the number of vertex (one more vertex compare to T) 5070 if len(tchannels) < 2: 5071 return tchannels 5072 5073 out = [] 5074 oldid2new = {} 5075 5076 # initialisation 5077 # id of the first T-channel (-X) 5078 propa_id = tchannels[0]['legs'][-1]['number'] 5079 # 5080 # Setup the last vertex to refenence the second id beam 5081 # -N (need to setup it to 2. 5082 initialid = tchannels[-1]['legs'][-1]['number'] 5083 oldid2new[initialid] = 2 5084 oldid2new[1] = initialid 5085 5086 i = 0 5087 while tchannels: 5088 old_vert = tchannels.pop() 5089 5090 #copy the vertex /leglist to avoid side effects 5091 new_vert = base_objects.Vertex(old_vert) 5092 new_vert['legs'] = base_objects.LegList([base_objects.Leg(l) for l in old_vert['legs']]) 5093 # vertex taken from the bottom we have 5094 # (-N+1 X > -N) we need to flip to pass to 5095 # -N X > -N+1 (and then relabel -N and -N+1 5096 legs = new_vert['legs'] # shorcut 5097 id1 = legs[0]['number'] 5098 id2 = legs[1]['number'] 5099 id3 = legs[2]['number'] 5100 # to be secure we also support (X -N+1 > -N) 5101 if id3 == id2 -1 and id1 !=1: 5102 legs[0], legs[1] = legs[1], legs[0] 5103 #flipping side 5104 legs[0], legs[2] = legs[2], legs[0] 5105 5106 # the only new relabelling is the last element of the list 5107 # always thanks to the above flipping 5108 old_propa_id = new_vert['legs'][-1]['number'] 5109 oldid2new[old_propa_id] = propa_id 5110 5111 5112 #pass to new convention for leg numbering: 5113 for l in new_vert['legs']: 5114 if l['number'] in oldid2new: 5115 l['number'] = oldid2new[l['number']] 5116 5117 # new_vert is now ready 5118 out.append(new_vert) 5119 # prepare next iteration 5120 propa_id -=1 5121 i +=1 5122 5123 return out
5124 5125 @staticmethod
5126 - def reorder_tchannels_pingpong(tchannels, id=2):
5127 """change the tchannel ordering to pass to a ping-pong strategy. 5128 assume ninitial == 2 5129 5130 We assume that we receive something like this 5131 5132 1 ----- X ------- -2 5133 | 5134 | (-X) 5135 | 5136 X -------- 4 5137 | 5138 | (-X-1) 5139 | 5140 X --------- -1 5141 5142 X---------- 3 5143 | 5144 | (-N+2) 5145 | 5146 X --------- L 5147 | 5148 | (-N+1) 5149 | 5150 -N ----- X ------- P 5151 5152 coded as 5153 (1 -2 > -X) (-X 4 > -X-1) (-X-1 -1 > -X-2) ... 5154 ((-N+3) 3 > (-N+2)) ((-n+2) L > (-n+1)) ((-n+1) P > -N) 5155 5156 we want to convert this as: 5157 1 ----- X ------- -2 5158 | 5159 | (-X) 5160 | 5161 X -------- 4 5162 | 5163 | (-X-2) 5164 | 5165 X --------- -1 5166 5167 X---------- 3 5168 | 5169 | (-X-3) 5170 | 5171 X --------- L 5172 | 5173 | (-X-1) 5174 | 5175 2 ----- X ------- P 5176 5177 coded as 5178 (1 -2 > -X) (2 P > -X-1) (-X 4 > -X-2) (-X-1 L > -X-3) ... 5179 """ 5180 5181 # no need to modified anything if 1 or less T-Channel 5182 #Note that this counts the number of vertex (one more vertex compare to T) 5183 if len(tchannels) < 2: 5184 return tchannels 5185 5186 out = [] 5187 oldid2new = {} 5188 5189 # initialisation 5190 # id of the first T-channel (-X) 5191 propa_id = tchannels[0]['legs'][-1]['number'] 5192 # 5193 # Setup the last vertex to refenence the second id beam 5194 # -N (need to setup it to 2. 5195 initialid = tchannels[-1]['legs'][-1]['number'] 5196 oldid2new[initialid] = id 5197 5198 5199 5200 i = 0 5201 while tchannels: 5202 #ping pong by taking first/last element in aternance 5203 if id ==2: 5204 if i % 2 == 0: 5205 old_vert = tchannels.pop(0) 5206 else: 5207 old_vert = tchannels.pop() 5208 else: 5209 if i % 2 != 0: 5210 old_vert = tchannels.pop(0) 5211 else: 5212 old_vert = tchannels.pop() 5213 5214 #copy the vertex /leglist to avoid side effects 5215 new_vert = base_objects.Vertex(old_vert) 5216 new_vert['legs'] = base_objects.LegList([base_objects.Leg(l) for l in old_vert['legs']]) 5217 # if vertex taken from the bottom we have 5218 # (-N+1 X > -N) we need to flip to pass to 5219 # -N X > -N+1 (and then relabel -N and -N+1 5220 # to be secure we also support (X -N+1 > -N) 5221 if (i % 2 ==1 and id ==2) or (i %2 == 0 and id ==1): 5222 legs = new_vert['legs'] # shorcut 5223 id1 = legs[0]['number'] 5224 id2 = legs[1]['number'] 5225 if id1 > id2: 5226 legs[0], legs[1] = legs[1], legs[0] 5227 else: 5228 legs[0], legs[2] = legs[2], legs[0] 5229 5230 # the only new relabelling is the last element of the list 5231 # always thanks to the above flipping 5232 old_propa_id = new_vert['legs'][-1]['number'] 5233 oldid2new[old_propa_id] = propa_id 5234 5235 if i==0 and id==1: 5236 legs[0]['number'] = 2 5237 5238 #pass to new convention for leg numbering: 5239 for l in new_vert['legs']: 5240 if l['number'] in oldid2new: 5241 l['number'] = oldid2new[l['number']] 5242 5243 # new_vert is now ready 5244 out.append(new_vert) 5245 # prepare next iteration 5246 propa_id -=1 5247 i +=1 5248 5249 return out
5250 5251 5252 5253 5254 5255 #=========================================================================== 5256 # write_decayBW_file 5257 #===========================================================================
5258 - def write_decayBW_file(self, writer, s_and_t_channels):
5259 """Write the decayBW.inc file for MadEvent""" 5260 5261 lines = [] 5262 5263 booldict = {None: "0", True: "1", False: "2"} 5264 5265 for iconf, config in enumerate(s_and_t_channels): 5266 schannels = config[0] 5267 for vertex in schannels: 5268 # For the resulting leg, pick out whether it comes from 5269 # decay or not, as given by the onshell flag 5270 leg = vertex.get('legs')[-1] 5271 lines.append("data gForceBW(%d,%d)/%s/" % \ 5272 (leg.get('number'), iconf + 1, 5273 booldict[leg.get('onshell')])) 5274 5275 # Write the file 5276 writer.writelines(lines) 5277 5278 return True
5279 5280 #=========================================================================== 5281 # write_dname_file 5282 #===========================================================================
5283 - def write_dname_file(self, writer, dir_name):
5284 """Write the dname.mg file for MG4""" 5285 5286 line = "DIRNAME=%s" % dir_name 5287 5288 # Write the file 5289 writer.write(line + "\n") 5290 5291 return True
5292 5293 #=========================================================================== 5294 # write_driver 5295 #===========================================================================
5296 - def write_driver(self, writer, ncomb, n_grouped_proc, v5=True):
5297 """Write the SubProcess/driver.f file for MG4""" 5298 5299 path = pjoin(_file_path,'iolibs','template_files','madevent_driver.f') 5300 5301 if self.model_name == 'mssm' or self.model_name.startswith('mssm-'): 5302 card = 'Source/MODEL/MG5_param.dat' 5303 else: 5304 card = 'param_card.dat' 5305 # Requiring each helicity configuration to be probed by 10 points for 5306 # matrix element before using the resulting grid for MC over helicity 5307 # sampling. 5308 # We multiply this by 2 because each grouped subprocess is called at most 5309 # twice for each IMIRROR. 5310 replace_dict = {'param_card_name':card, 5311 'ncomb':ncomb, 5312 'hel_init_points':n_grouped_proc*10*2} 5313 if not v5: 5314 replace_dict['secondparam']=',.true.' 5315 else: 5316 replace_dict['secondparam']='' 5317 5318 if writer: 5319 text = open(path).read() % replace_dict 5320 writer.write(text) 5321 return True 5322 else: 5323 return replace_dict
5324 5325 #=========================================================================== 5326 # write_addmothers 5327 #===========================================================================
5328 - def write_addmothers(self, writer):
5329 """Write the SubProcess/addmothers.f""" 5330 5331 path = pjoin(_file_path,'iolibs','template_files','addmothers.f') 5332 5333 text = open(path).read() % {'iconfig': 'diag_number'} 5334 writer.write(text) 5335 5336 return True
5337 5338 5339 #=========================================================================== 5340 # write_combine_events 5341 #===========================================================================
5342 - def write_combine_events(self, writer, nb_proc=100):
5343 """Write the SubProcess/driver.f file for MG4""" 5344 5345 path = pjoin(_file_path,'iolibs','template_files','madevent_combine_events.f') 5346 5347 if self.model_name == 'mssm' or self.model_name.startswith('mssm-'): 5348 card = 'Source/MODEL/MG5_param.dat' 5349 else: 5350 card = 'param_card.dat' 5351 5352 #set maxpup (number of @X in the process card) 5353 5354 text = open(path).read() % {'param_card_name':card, 'maxpup':nb_proc+1} 5355 #the +1 is just a security. This is not needed but I feel(OM) safer with it. 5356 writer.write(text) 5357 5358 return True
5359 5360 5361 #=========================================================================== 5362 # write_symmetry 5363 #===========================================================================
5364 - def write_symmetry(self, writer, v5=True):
5365 """Write the SubProcess/driver.f file for ME""" 5366 5367 5368 path = pjoin(_file_path,'iolibs','template_files','madevent_symmetry.f') 5369 5370 if self.model_name == 'mssm' or self.model_name.startswith('mssm-'): 5371 card = 'Source/MODEL/MG5_param.dat' 5372 else: 5373 card = 'param_card.dat' 5374 5375 if v5: 5376 replace_dict = {'param_card_name':card, 'setparasecondarg':''} 5377 else: 5378 replace_dict= {'param_card_name':card, 'setparasecondarg':',.true.'} 5379 5380 if writer: 5381 text = open(path).read() 5382 text = text % replace_dict 5383 writer.write(text) 5384 return True 5385 else: 5386 return replace_dict
5387 5388 5389 5390 #=========================================================================== 5391 # write_iproc_file 5392 #===========================================================================
5393 - def write_iproc_file(self, writer, me_number):
5394 """Write the iproc.dat file for MG4""" 5395 line = "%d" % (me_number + 1) 5396 5397 # Write the file 5398 for line_to_write in writer.write_line(line): 5399 writer.write(line_to_write) 5400 return True
5401 5402 #=========================================================================== 5403 # write_mg_sym_file 5404 #===========================================================================
5405 - def write_mg_sym_file(self, writer, matrix_element):
5406 """Write the mg.sym file for MadEvent.""" 5407 5408 lines = [] 5409 5410 # Extract process with all decays included 5411 final_legs = [leg for leg in matrix_element.get('processes')[0].get_legs_with_decays() if leg.get('state') == True] 5412 5413 ninitial = len([leg for leg in matrix_element.get('processes')[0].get('legs') if leg.get('state') == False]) 5414 5415 identical_indices = {} 5416 5417 # Extract identical particle info 5418 for i, leg in enumerate(final_legs): 5419 if leg.get('id') in identical_indices: 5420 identical_indices[leg.get('id')].append(\ 5421 i + ninitial + 1) 5422 else: 5423 identical_indices[leg.get('id')] = [i + ninitial + 1] 5424 5425 # Remove keys which have only one particle 5426 for key in list(identical_indices.keys()): 5427 if len(identical_indices[key]) < 2: 5428 del identical_indices[key] 5429 5430 # Write mg.sym file 5431 lines.append(str(len(list(identical_indices.keys())))) 5432 for key in identical_indices.keys(): 5433 lines.append(str(len(identical_indices[key]))) 5434 for number in identical_indices[key]: 5435 lines.append(str(number)) 5436 5437 # Write the file 5438 writer.writelines(lines) 5439 5440 return True
5441 5442 #=========================================================================== 5443 # write_mg_sym_file 5444 #===========================================================================
5445 - def write_default_mg_sym_file(self, writer):
5446 """Write the mg.sym file for MadEvent.""" 5447 5448 lines = "0" 5449 5450 # Write the file 5451 writer.writelines(lines) 5452 5453 return True
5454 5455 #=========================================================================== 5456 # write_ncombs_file 5457 #===========================================================================
5458 - def write_ncombs_file(self, writer, nexternal):
5459 """Write the ncombs.inc file for MadEvent.""" 5460 5461 # ncomb (used for clustering) is 2^nexternal 5462 file = " integer n_max_cl\n" 5463 file = file + "parameter (n_max_cl=%d)" % (2 ** nexternal) 5464 5465 # Write the file 5466 writer.writelines(file) 5467 5468 return True
5469 5470 #=========================================================================== 5471 # write_processes_file 5472 #===========================================================================
5473 - def write_processes_file(self, writer, subproc_group):
5474 """Write the processes.dat file with info about the subprocesses 5475 in this group.""" 5476 5477 lines = [] 5478 5479 for ime, me in \ 5480 enumerate(subproc_group.get('matrix_elements')): 5481 lines.append("%s %s" % (str(ime+1) + " " * (7-len(str(ime+1))), 5482 ",".join(p.base_string() for p in \ 5483 me.get('processes')))) 5484 if me.get('has_mirror_process'): 5485 mirror_procs = [copy.copy(p) for p in me.get('processes')] 5486 for proc in mirror_procs: 5487 legs = copy.copy(proc.get('legs_with_decays')) 5488 legs.insert(0, legs.pop(1)) 5489 proc.set("legs_with_decays", legs) 5490 lines.append("mirror %s" % ",".join(p.base_string() for p in \ 5491 mirror_procs)) 5492 else: 5493 lines.append("mirror none") 5494 5495 # Write the file 5496 writer.write("\n".join(lines)) 5497 5498 return True
5499 5500 #=========================================================================== 5501 # write_symswap_file 5502 #===========================================================================
5503 - def write_symswap_file(self, writer, ident_perms):
5504 """Write the file symswap.inc for MG4 by comparing diagrams using 5505 the internal matrix element value functionality.""" 5506 5507 lines = [] 5508 5509 # Write out lines for symswap.inc file (used to permute the 5510 # external leg momenta 5511 for iperm, perm in enumerate(ident_perms): 5512 lines.append("data (isym(i,%d),i=1,nexternal)/%s/" % \ 5513 (iperm+1, ",".join([str(i+1) for i in perm]))) 5514 lines.append("data nsym/%d/" % len(ident_perms)) 5515 5516 # Write the file 5517 writer.writelines(lines) 5518 5519 return True
5520 5521 #=========================================================================== 5522 # write_symfact_file 5523 #===========================================================================
5524 - def write_symfact_file(self, writer, symmetry):
5525 """Write the files symfact.dat for MG4 by comparing diagrams using 5526 the internal matrix element value functionality.""" 5527 5528 pos = max(2, int(math.ceil(math.log10(len(symmetry))))) 5529 form = "%"+str(pos)+"r %"+str(pos+1)+"r" 5530 # Write out lines for symswap.inc file (used to permute the 5531 # external leg momenta 5532 lines = [ form %(i+1, s) for i,s in enumerate(symmetry) if s != 0] 5533 # Write the file 5534 writer.write('\n'.join(lines)) 5535 writer.write('\n') 5536 5537 return True
5538 5539 #=========================================================================== 5540 # write_symperms_file 5541 #===========================================================================
5542 - def write_symperms_file(self, writer, perms):
5543 """Write the symperms.inc file for subprocess group, used for 5544 symmetric configurations""" 5545 5546 lines = [] 5547 for iperm, perm in enumerate(perms): 5548 lines.append("data (perms(i,%d),i=1,nexternal)/%s/" % \ 5549 (iperm+1, ",".join([str(i+1) for i in perm]))) 5550 5551 # Write the file 5552 writer.writelines(lines) 5553 5554 return True
5555 5556 #=========================================================================== 5557 # write_subproc 5558 #===========================================================================
5559 - def write_subproc(self, writer, subprocdir):
5560 """Append this subprocess to the subproc.mg file for MG4""" 5561 5562 # Write line to file 5563 writer.write(subprocdir + "\n") 5564 5565 return True
5566
5567 #=============================================================================== 5568 # ProcessExporterFortranMEGroup 5569 #=============================================================================== 5570 -class ProcessExporterFortranMEGroup(ProcessExporterFortranME):
5571 """Class to take care of exporting a set of matrix elements to 5572 MadEvent subprocess group format.""" 5573 5574 matrix_file = "matrix_madevent_group_v4.inc" 5575 grouped_mode = 'madevent' 5576 default_opt = {'clean': False, 'complex_mass':False, 5577 'export_format':'madevent', 'mp': False, 5578 'v5_model': True, 5579 'output_options':{}, 5580 'hel_recycling': True 5581 } 5582 5583 5584 #=========================================================================== 5585 # generate_subprocess_directory 5586 #===========================================================================
5587 - def generate_subprocess_directory(self, subproc_group, 5588 fortran_model, 5589 group_number):
5590 """Generate the Pn directory for a subprocess group in MadEvent, 5591 including the necessary matrix_N.f files, configs.inc and various 5592 other helper files.""" 5593 5594 assert isinstance(subproc_group, group_subprocs.SubProcessGroup), \ 5595 "subproc_group object not SubProcessGroup" 5596 5597 if not self.model: 5598 self.model = subproc_group.get('matrix_elements')[0].\ 5599 get('processes')[0].get('model') 5600 5601 cwd = os.getcwd() 5602 path = pjoin(self.dir_path, 'SubProcesses') 5603 5604 os.chdir(path) 5605 pathdir = os.getcwd() 5606 5607 # Create the directory PN in the specified path 5608 subprocdir = "P%d_%s" % (subproc_group.get('number'), 5609 subproc_group.get('name')) 5610 try: 5611 os.mkdir(subprocdir) 5612 except os.error as error: 5613 logger.warning(error.strerror + " " + subprocdir) 5614 5615 try: 5616 os.chdir(subprocdir) 5617 except os.error: 5618 logger.error('Could not cd to directory %s' % subprocdir) 5619 return 0 5620 5621 logger.info('Creating files in directory %s' % subprocdir) 5622 5623 # Create the matrix.f files, auto_dsig.f files and all inc files 5624 # for all subprocesses in the group 5625 5626 maxamps = 0 5627 maxflows = 0 5628 tot_calls = 0 5629 5630 matrix_elements = subproc_group.get('matrix_elements') 5631 5632 # Add the driver.f, all grouped ME's must share the same number of 5633 # helicity configuration 5634 ncomb = matrix_elements[0].get_helicity_combinations() 5635 for me in matrix_elements[1:]: 5636 if ncomb!=me.get_helicity_combinations(): 5637 raise MadGraph5Error("All grouped processes must share the "+\ 5638 "same number of helicity configurations.") 5639 5640 filename = 'driver.f' 5641 self.write_driver(writers.FortranWriter(filename),ncomb, 5642 n_grouped_proc=len(matrix_elements), v5=self.opt['v5_model']) 5643 5644 try: 5645 self.proc_characteristic['hel_recycling'] = self.opt['hel_recycling'] 5646 except KeyError: 5647 self.proc_characteristic['hel_recycling'] = False 5648 self.opt['hel_recycling'] = False 5649 for ime, matrix_element in \ 5650 enumerate(matrix_elements): 5651 if self.opt['hel_recycling']: 5652 filename = 'matrix%d_orig.f' % (ime+1) 5653 replace_dict = self.write_matrix_element_v4(None, 5654 matrix_element, 5655 fortran_model, 5656 proc_id=str(ime+1), 5657 config_map=subproc_group.get('diagram_maps')[ime], 5658 subproc_number=group_number) 5659 calls,ncolor = replace_dict['return_value'] 5660 tfile = open(replace_dict['template_file']).read() 5661 file = tfile % replace_dict 5662 # Add the split orders helper functions. 5663 file = file + '\n' + open(replace_dict['template_file2'])\ 5664 .read()%replace_dict 5665 # Write the file 5666 writer = writers.FortranWriter(filename) 5667 writer.writelines(file) 5668 5669 # 5670 # write the dedicated template for helicity recycling 5671 # 5672 tfile = open(replace_dict['template_file'].replace('.inc',"_hel.inc")).read() 5673 file = tfile % replace_dict 5674 # Add the split orders helper functions. 5675 file = file + '\n' + open(replace_dict['template_file2'])\ 5676 .read()%replace_dict 5677 # Write the file 5678 writer = writers.FortranWriter('template_matrix%d.f' % (ime+1)) 5679 writer.uniformcase = False 5680 writer.writelines(file) 5681 5682 5683 5684 5685 else: 5686 filename = 'matrix%d.f' % (ime+1) 5687 calls, ncolor = \ 5688 self.write_matrix_element_v4(writers.FortranWriter(filename), 5689 matrix_element, 5690 fortran_model, 5691 proc_id=str(ime+1), 5692 config_map=subproc_group.get('diagram_maps')[ime], 5693 subproc_number=group_number) 5694 5695 5696 5697 filename = 'auto_dsig%d.f' % (ime+1) 5698 self.write_auto_dsig_file(writers.FortranWriter(filename), 5699 matrix_element, 5700 str(ime+1)) 5701 5702 # Keep track of needed quantities 5703 tot_calls += int(calls) 5704 maxflows = max(maxflows, ncolor) 5705 maxamps = max(maxamps, len(matrix_element.get('diagrams'))) 5706 5707 # Draw diagrams 5708 if not 'noeps' in self.opt['output_options'] or self.opt['output_options']['noeps'] != 'True': 5709 filename = "matrix%d.ps" % (ime+1) 5710 plot = draw.MultiEpsDiagramDrawer(matrix_element.get('base_amplitude').\ 5711 get('diagrams'), 5712 filename, 5713 model = \ 5714 matrix_element.get('processes')[0].\ 5715 get('model'), 5716 amplitude=True) 5717 logger.info("Generating Feynman diagrams for " + \ 5718 matrix_element.get('processes')[0].nice_string()) 5719 plot.draw() 5720 5721 # Extract number of external particles 5722 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 5723 5724 # Generate a list of diagrams corresponding to each configuration 5725 # [[d1, d2, ...,dn],...] where 1,2,...,n is the subprocess number 5726 # If a subprocess has no diagrams for this config, the number is 0 5727 subproc_diagrams_for_config = subproc_group.get('diagrams_for_configs') 5728 5729 filename = 'auto_dsig.f' 5730 self.write_super_auto_dsig_file(writers.FortranWriter(filename), 5731 subproc_group) 5732 5733 filename = 'coloramps.inc' 5734 self.write_coloramps_file(writers.FortranWriter(filename), 5735 subproc_diagrams_for_config, 5736 maxflows, 5737 matrix_elements) 5738 5739 filename = 'get_color.f' 5740 self.write_colors_file(writers.FortranWriter(filename), 5741 matrix_elements) 5742 5743 filename = 'config_subproc_map.inc' 5744 self.write_config_subproc_map_file(writers.FortranWriter(filename), 5745 subproc_diagrams_for_config) 5746 5747 filename = 'configs.inc' 5748 nconfigs, (s_and_t_channels, nqcd_list) = self.write_configs_file(\ 5749 writers.FortranWriter(filename), 5750 subproc_group, 5751 subproc_diagrams_for_config) 5752 5753 filename = 'config_nqcd.inc' 5754 self.write_config_nqcd_file(writers.FortranWriter(filename), 5755 nqcd_list) 5756 5757 filename = 'decayBW.inc' 5758 self.write_decayBW_file(writers.FortranWriter(filename), 5759 s_and_t_channels) 5760 5761 filename = 'dname.mg' 5762 self.write_dname_file(writers.FortranWriter(filename), 5763 subprocdir) 5764 5765 filename = 'iproc.dat' 5766 self.write_iproc_file(writers.FortranWriter(filename), 5767 group_number) 5768 5769 filename = 'leshouche.inc' 5770 self.write_leshouche_file(writers.FortranWriter(filename), 5771 subproc_group) 5772 5773 filename = 'maxamps.inc' 5774 self.write_maxamps_file(writers.FortranWriter(filename), 5775 maxamps, 5776 maxflows, 5777 max([len(me.get('processes')) for me in \ 5778 matrix_elements]), 5779 len(matrix_elements)) 5780 5781 # Note that mg.sym is not relevant for this case 5782 filename = 'mg.sym' 5783 self.write_default_mg_sym_file(writers.FortranWriter(filename)) 5784 5785 filename = 'mirrorprocs.inc' 5786 self.write_mirrorprocs(writers.FortranWriter(filename), 5787 subproc_group) 5788 5789 filename = 'ncombs.inc' 5790 self.write_ncombs_file(writers.FortranWriter(filename), 5791 nexternal) 5792 5793 filename = 'nexternal.inc' 5794 self.write_nexternal_file(writers.FortranWriter(filename), 5795 nexternal, ninitial) 5796 5797 filename = 'ngraphs.inc' 5798 self.write_ngraphs_file(writers.FortranWriter(filename), 5799 nconfigs) 5800 5801 filename = 'pmass.inc' 5802 self.write_pmass_file(writers.FortranWriter(filename), 5803 matrix_element) 5804 5805 filename = 'props.inc' 5806 self.write_props_file(writers.FortranWriter(filename), 5807 matrix_element, 5808 s_and_t_channels) 5809 5810 filename = 'processes.dat' 5811 files.write_to_file(filename, 5812 self.write_processes_file, 5813 subproc_group) 5814 5815 # Find config symmetries and permutations 5816 symmetry, perms, ident_perms = \ 5817 diagram_symmetry.find_symmetry(subproc_group) 5818 5819 filename = 'symswap.inc' 5820 self.write_symswap_file(writers.FortranWriter(filename), 5821 ident_perms) 5822 5823 filename = 'symfact_orig.dat' 5824 self.write_symfact_file(open(filename, 'w'), symmetry) 5825 5826 # check consistency 5827 for i, sym_fact in enumerate(symmetry): 5828 5829 if sym_fact >= 0: 5830 continue 5831 if nqcd_list[i] != nqcd_list[abs(sym_fact)-1]: 5832 misc.sprint(i, sym_fact, nqcd_list[i], nqcd_list[abs(sym_fact)]) 5833 raise Exception("identical diagram with different QCD powwer") 5834 5835 5836 filename = 'symperms.inc' 5837 self.write_symperms_file(writers.FortranWriter(filename), 5838 perms) 5839 5840 # Generate jpgs -> pass in make_html 5841 #os.system(pjoin('..', '..', 'bin', 'gen_jpeg-pl')) 5842 5843 self.link_files_in_SubProcess(pjoin(pathdir,subprocdir)) 5844 5845 #import nexternal/leshouch in Source 5846 ln('nexternal.inc', '../../Source', log=False) 5847 ln('leshouche.inc', '../../Source', log=False) 5848 ln('maxamps.inc', '../../Source', log=False) 5849 5850 # Return to SubProcesses dir) 5851 os.chdir(pathdir) 5852 5853 # Add subprocess to subproc.mg 5854 filename = 'subproc.mg' 5855 files.append_to_file(filename, 5856 self.write_subproc, 5857 subprocdir) 5858 5859 # Return to original dir 5860 os.chdir(cwd) 5861 5862 if not tot_calls: 5863 tot_calls = 0 5864 return tot_calls
5865 5866 #=========================================================================== 5867 # write_super_auto_dsig_file 5868 #===========================================================================
5869 - def write_super_auto_dsig_file(self, writer, subproc_group):
5870 """Write the auto_dsig.f file selecting between the subprocesses 5871 in subprocess group mode""" 5872 5873 replace_dict = {} 5874 5875 # Extract version number and date from VERSION file 5876 info_lines = self.get_mg5_info_lines() 5877 replace_dict['info_lines'] = info_lines 5878 5879 matrix_elements = subproc_group.get('matrix_elements') 5880 5881 # Extract process info lines 5882 process_lines = '\n'.join([self.get_process_info_lines(me) for me in \ 5883 matrix_elements]) 5884 replace_dict['process_lines'] = process_lines 5885 5886 nexternal, ninitial = matrix_elements[0].get_nexternal_ninitial() 5887 replace_dict['nexternal'] = nexternal 5888 5889 replace_dict['nsprocs'] = 2*len(matrix_elements) 5890 5891 # Generate dsig definition line 5892 dsig_def_line = "DOUBLE PRECISION " + \ 5893 ",".join(["DSIG%d" % (iproc + 1) for iproc in \ 5894 range(len(matrix_elements))]) 5895 replace_dict["dsig_def_line"] = dsig_def_line 5896 5897 # Generate dsig process lines 5898 call_dsig_proc_lines = [] 5899 for iproc in range(len(matrix_elements)): 5900 call_dsig_proc_lines.append(\ 5901 "IF(IPROC.EQ.%(num)d) DSIGPROC=DSIG%(num)d(P1,WGT,IMODE) ! %(proc)s" % \ 5902 {"num": iproc + 1, 5903 "proc": matrix_elements[iproc].get('processes')[0].base_string()}) 5904 replace_dict['call_dsig_proc_lines'] = "\n".join(call_dsig_proc_lines) 5905 5906 ncomb=matrix_elements[0].get_helicity_combinations() 5907 replace_dict['read_write_good_hel'] = self.read_write_good_hel(ncomb) 5908 5909 s1,s2 = matrix_elements[0].get_spin_state_initial() 5910 replace_dict['nb_spin_state1'] = s1 5911 replace_dict['nb_spin_state2'] = s2 5912 5913 printzeroamp = [] 5914 for iproc in range(len(matrix_elements)): 5915 printzeroamp.append(\ 5916 " call print_zero_amp_%i()" % ( iproc + 1)) 5917 replace_dict['print_zero_amp'] = "\n".join(printzeroamp) 5918 5919 5920 if writer: 5921 file = open(pjoin(_file_path, \ 5922 'iolibs/template_files/super_auto_dsig_group_v4.inc')).read() 5923 file = file % replace_dict 5924 5925 # Write the file 5926 writer.writelines(file) 5927 else: 5928 return replace_dict
5929 5930 #=========================================================================== 5931 # write_mirrorprocs 5932 #===========================================================================
5933 - def write_mirrorprocs(self, writer, subproc_group):
5934 """Write the mirrorprocs.inc file determining which processes have 5935 IS mirror process in subprocess group mode.""" 5936 5937 lines = [] 5938 bool_dict = {True: '.true.', False: '.false.'} 5939 matrix_elements = subproc_group.get('matrix_elements') 5940 lines.append("DATA (MIRRORPROCS(I),I=1,%d)/%s/" % \ 5941 (len(matrix_elements), 5942 ",".join([bool_dict[me.get('has_mirror_process')] for \ 5943 me in matrix_elements]))) 5944 # Write the file 5945 writer.writelines(lines)
5946 5947 #=========================================================================== 5948 # write_addmothers 5949 #===========================================================================
5950 - def write_addmothers(self, writer):
5951 """Write the SubProcess/addmothers.f""" 5952 5953 path = pjoin(_file_path,'iolibs','template_files','addmothers.f') 5954 5955 text = open(path).read() % {'iconfig': 'lconfig'} 5956 writer.write(text) 5957 5958 return True
5959 5960 5961 #=========================================================================== 5962 # write_coloramps_file 5963 #===========================================================================
5964 - def write_coloramps_file(self, writer, diagrams_for_config, maxflows, 5965 matrix_elements):
5966 """Write the coloramps.inc file for MadEvent in Subprocess group mode""" 5967 5968 # Create a map from subprocess (matrix element) to a list of 5969 # the diagrams corresponding to each config 5970 5971 lines = [] 5972 5973 subproc_to_confdiag = {} 5974 for config in diagrams_for_config: 5975 for subproc, diag in enumerate(config): 5976 try: 5977 subproc_to_confdiag[subproc].append(diag) 5978 except KeyError: 5979 subproc_to_confdiag[subproc] = [diag] 5980 5981 for subproc in sorted(subproc_to_confdiag.keys()): 5982 lines.extend(self.get_icolamp_lines(subproc_to_confdiag[subproc], 5983 matrix_elements[subproc], 5984 subproc + 1)) 5985 5986 lines.insert(0, "logical icolamp(%d,%d,%d)" % \ 5987 (maxflows, 5988 len(diagrams_for_config), 5989 len(matrix_elements))) 5990 5991 # Write the file 5992 writer.writelines(lines) 5993 5994 return True
5995 5996 #=========================================================================== 5997 # write_config_subproc_map_file 5998 #===========================================================================
5999 - def write_config_subproc_map_file(self, writer, config_subproc_map):
6000 """Write the config_subproc_map.inc file for subprocess groups""" 6001 6002 lines = [] 6003 # Output only configs that have some corresponding diagrams 6004 iconfig = 0 6005 for config in config_subproc_map: 6006 if set(config) == set([0]): 6007 continue 6008 lines.append("DATA (CONFSUB(i,%d),i=1,%d)/%s/" % \ 6009 (iconfig + 1, len(config), 6010 ",".join([str(i) for i in config]))) 6011 iconfig += 1 6012 # Write the file 6013 writer.writelines(lines) 6014 6015 return True
6016 6017 #=========================================================================== 6018 # read_write_good_hel 6019 #===========================================================================
6020 - def read_write_good_hel(self, ncomb):
6021 """return the code to read/write the good_hel common_block""" 6022 6023 convert = {'ncomb' : ncomb} 6024 6025 output = """ 6026 subroutine write_good_hel(stream_id) 6027 implicit none 6028 integer stream_id 6029 INTEGER NCOMB 6030 PARAMETER ( NCOMB=%(ncomb)d) 6031 LOGICAL GOODHEL(NCOMB, 2) 6032 INTEGER NTRY(2) 6033 common/BLOCK_GOODHEL/NTRY,GOODHEL 6034 write(stream_id,*) GOODHEL 6035 return 6036 end 6037 6038 6039 subroutine read_good_hel(stream_id) 6040 implicit none 6041 include 'genps.inc' 6042 integer stream_id 6043 INTEGER NCOMB 6044 PARAMETER ( NCOMB=%(ncomb)d) 6045 LOGICAL GOODHEL(NCOMB, 2) 6046 INTEGER NTRY(2) 6047 common/BLOCK_GOODHEL/NTRY,GOODHEL 6048 read(stream_id,*) GOODHEL 6049 NTRY(1) = MAXTRIES + 1 6050 NTRY(2) = MAXTRIES + 1 6051 return 6052 end 6053 6054 subroutine init_good_hel() 6055 implicit none 6056 INTEGER NCOMB 6057 PARAMETER ( NCOMB=%(ncomb)d) 6058 LOGICAL GOODHEL(NCOMB, 2) 6059 INTEGER NTRY(2) 6060 INTEGER I 6061 6062 do i=1,NCOMB 6063 GOODHEL(I,1) = .false. 6064 GOODHEL(I,2) = .false. 6065 enddo 6066 NTRY(1) = 0 6067 NTRY(2) = 0 6068 end 6069 6070 integer function get_maxsproc() 6071 implicit none 6072 include 'maxamps.inc' 6073 6074 get_maxsproc = maxsproc 6075 return 6076 end 6077 6078 """ % convert 6079 6080 return output
6081 6082 6083 6084 #=========================================================================== 6085 # write_configs_file 6086 #===========================================================================
6087 - def write_configs_file(self, writer, subproc_group, diagrams_for_config):
6088 """Write the configs.inc file with topology information for a 6089 subprocess group. Use the first subprocess with a diagram for each 6090 configuration.""" 6091 6092 matrix_elements = subproc_group.get('matrix_elements') 6093 model = matrix_elements[0].get('processes')[0].get('model') 6094 6095 diagrams = [] 6096 config_numbers = [] 6097 for iconfig, config in enumerate(diagrams_for_config): 6098 # Check if any diagrams correspond to this config 6099 if set(config) == set([0]): 6100 continue 6101 subproc_diags = [] 6102 for s,d in enumerate(config): 6103 if d: 6104 subproc_diags.append(matrix_elements[s].\ 6105 get('diagrams')[d-1]) 6106 else: 6107 subproc_diags.append(None) 6108 diagrams.append(subproc_diags) 6109 config_numbers.append(iconfig + 1) 6110 6111 # Extract number of external particles 6112 (nexternal, ninitial) = subproc_group.get_nexternal_ninitial() 6113 6114 return len(diagrams), \ 6115 self.write_configs_file_from_diagrams(writer, diagrams, 6116 config_numbers, 6117 nexternal, ninitial, 6118 model)
6119 6120 #=========================================================================== 6121 # write_run_configs_file 6122 #===========================================================================
6123 - def write_run_config_file(self, writer):
6124 """Write the run_configs.inc file for MadEvent""" 6125 6126 path = pjoin(_file_path,'iolibs','template_files','madevent_run_config.inc') 6127 if self.proc_characteristic['loop_induced']: 6128 job_per_chan = 1 6129 else: 6130 job_per_chan = 2 6131 text = open(path).read() % {'chanperjob':job_per_chan} 6132 writer.write(text) 6133 return True
6134 6135 6136 #=========================================================================== 6137 # write_leshouche_file 6138 #===========================================================================
6139 - def write_leshouche_file(self, writer, subproc_group):
6140 """Write the leshouche.inc file for MG4""" 6141 6142 all_lines = [] 6143 6144 for iproc, matrix_element in \ 6145 enumerate(subproc_group.get('matrix_elements')): 6146 all_lines.extend(self.get_leshouche_lines(matrix_element, 6147 iproc)) 6148 # Write the file 6149 writer.writelines(all_lines) 6150 return True
6151 6152
6153 - def finalize(self,*args, **opts):
6154 6155 super(ProcessExporterFortranMEGroup, self).finalize(*args, **opts) 6156 #ensure that the grouping information is on the correct value 6157 self.proc_characteristic['grouped_matrix'] = True
6158 6159 6160 #=============================================================================== 6161 # UFO_model_to_mg4 6162 #=============================================================================== 6163 6164 python_to_fortran = lambda x: parsers.UFOExpressionParserFortran().parse(x)
6165 6166 -class UFO_model_to_mg4(object):
6167 """ A converter of the UFO-MG5 Model to the MG4 format """ 6168 6169 # The list below shows the only variables the user is allowed to change by 6170 # himself for each PS point. If he changes any other, then calling 6171 # UPDATE_AS_PARAM() (or equivalently MP_UPDATE_AS_PARAM()) will not 6172 # correctly account for the change. 6173 PS_dependent_key = ['aS','MU_R'] 6174 mp_complex_format = 'complex*32' 6175 mp_real_format = 'real*16' 6176 # Warning, it is crucial none of the couplings/parameters of the model 6177 # starts with this prefix. I should add a check for this. 6178 # You can change it as the global variable to check_param_card.ParamCard 6179 mp_prefix = check_param_card.ParamCard.mp_prefix 6180
6181 - def __init__(self, model, output_path, opt=None):
6182 """ initialization of the objects """ 6183 6184 self.model = model 6185 self.model_name = model['name'] 6186 self.dir_path = output_path 6187 6188 self.opt = {'complex_mass': False, 'export_format': 'madevent', 'mp':True, 6189 'loop_induced': False} 6190 if opt: 6191 self.opt.update(opt) 6192 6193 self.coups_dep = [] # (name, expression, type) 6194 self.coups_indep = [] # (name, expression, type) 6195 self.params_dep = [] # (name, expression, type) 6196 self.params_indep = [] # (name, expression, type) 6197 self.params_ext = [] # external parameter 6198 self.p_to_f = parsers.UFOExpressionParserFortran(self.model) 6199 self.mp_p_to_f = parsers.UFOExpressionParserMPFortran(self.model)
6200 6201
6203 """modify the parameter if some of them are identical up to the case""" 6204 6205 lower_dict={} 6206 duplicate = set() 6207 keys = list(self.model['parameters'].keys()) 6208 keys.sort() 6209 for key in keys: 6210 for param in self.model['parameters'][key]: 6211 lower_name = param.name.lower() 6212 if not lower_name: 6213 continue 6214 try: 6215 lower_dict[lower_name].append(param) 6216 except KeyError as error: 6217 lower_dict[lower_name] = [param] 6218 else: 6219 duplicate.add(lower_name) 6220 logger.debug('%s is define both as lower case and upper case.' 6221 % lower_name) 6222 if not duplicate: 6223 return 6224 6225 re_expr = r'''\b(%s)\b''' 6226 to_change = [] 6227 change={} 6228 for value in duplicate: 6229 for i, var in enumerate(lower_dict[value]): 6230 to_change.append(var.name) 6231 new_name = '%s%s' % (var.name.lower(), 6232 ('__%d'%(i+1) if i>0 else '')) 6233 change[var.name] = new_name 6234 var.name = new_name 6235 6236 # Apply the modification to the map_CTcoup_CTparam of the model 6237 # if it has one (giving for each coupling the CT parameters whcih 6238 # are necessary and which should be exported to the model. 6239 if hasattr(self.model,'map_CTcoup_CTparam'): 6240 for coup, ctparams in self.model.map_CTcoup_CTparam: 6241 for i, ctparam in enumerate(ctparams): 6242 try: 6243 self.model.map_CTcoup_CTparam[coup][i] = change[ctparam] 6244 except KeyError: 6245 pass 6246 6247 replace = lambda match_pattern: change[match_pattern.groups()[0]] 6248 rep_pattern = re.compile(re_expr % '|'.join(to_change)) 6249 6250 # change parameters 6251 for key in keys: 6252 if key == ('external',): 6253 continue 6254 for param in self.model['parameters'][key]: 6255 param.expr = rep_pattern.sub(replace, param.expr) 6256 6257 # change couplings 6258 for key in self.model['couplings'].keys(): 6259 for coup in self.model['couplings'][key]: 6260 coup.expr = rep_pattern.sub(replace, coup.expr) 6261 6262 # change mass/width 6263 for part in self.model['particles']: 6264 if str(part.get('mass')) in to_change: 6265 part.set('mass', rep_pattern.sub(replace, str(part.get('mass')))) 6266 if str(part.get('width')) in to_change: 6267 part.set('width', rep_pattern.sub(replace, str(part.get('width'))))
6268
6269 - def refactorize(self, wanted_couplings = []):
6270 """modify the couplings to fit with MG4 convention """ 6271 6272 # Keep only separation in alphaS 6273 keys = list(self.model['parameters'].keys()) 6274 keys.sort(key=len) 6275 for key in keys: 6276 to_add = [o for o in self.model['parameters'][key] if o.name] 6277 6278 if key == ('external',): 6279 self.params_ext += to_add 6280 elif any([(k in key) for k in self.PS_dependent_key]): 6281 self.params_dep += to_add 6282 else: 6283 self.params_indep += to_add 6284 # same for couplings 6285 keys = list(self.model['couplings'].keys()) 6286 keys.sort(key=len) 6287 for key, coup_list in self.model['couplings'].items(): 6288 if any([(k in key) for k in self.PS_dependent_key]): 6289 self.coups_dep += [c for c in coup_list if 6290 (not wanted_couplings or c.name in \ 6291 wanted_couplings)] 6292 else: 6293 self.coups_indep += [c for c in coup_list if 6294 (not wanted_couplings or c.name in \ 6295 wanted_couplings)] 6296 6297 # MG4 use G and not aS as it basic object for alphas related computation 6298 #Pass G in the independant list 6299 if 'G' in self.params_dep: 6300 index = self.params_dep.index('G') 6301 G = self.params_dep.pop(index) 6302 # G.expr = '2*cmath.sqrt(as*pi)' 6303 # self.params_indep.insert(0, self.params_dep.pop(index)) 6304 # No need to add it if not defined 6305 6306 if 'aS' not in self.params_ext: 6307 logger.critical('aS not define as external parameter adding it!') 6308 #self.model['parameters']['aS'] = base_objects.ParamCardVariable('aS', 0.138,'DUMMY',(1,)) 6309 self.params_indep.append( base_objects. ModelVariable('aS', '0.138','real')) 6310 self.params_indep.append( base_objects. ModelVariable('G', '4.1643','real'))
6311 - def build(self, wanted_couplings = [], full=True):
6312 """modify the couplings to fit with MG4 convention and creates all the 6313 different files""" 6314 6315 self.pass_parameter_to_case_insensitive() 6316 self.refactorize(wanted_couplings) 6317 6318 # write the files 6319 if full: 6320 if wanted_couplings: 6321 # extract the wanted ct parameters 6322 self.extract_needed_CTparam(wanted_couplings=wanted_couplings) 6323 self.write_all()
6324 6325
6326 - def open(self, name, comment='c', format='default'):
6327 """ Open the file name in the correct directory and with a valid 6328 header.""" 6329 6330 file_path = pjoin(self.dir_path, name) 6331 6332 if format == 'fortran': 6333 fsock = writers.FortranWriter(file_path, 'w') 6334 write_class = io.FileIO 6335 6336 write_class.writelines(fsock, comment * 77 + '\n') 6337 write_class.writelines(fsock, '%(comment)s written by the UFO converter\n' % \ 6338 {'comment': comment + (6 - len(comment)) * ' '}) 6339 write_class.writelines(fsock, comment * 77 + '\n\n') 6340 else: 6341 fsock = open(file_path, 'w') 6342 fsock.writelines(comment * 77 + '\n') 6343 fsock.writelines('%(comment)s written by the UFO converter\n' % \ 6344 {'comment': comment + (6 - len(comment)) * ' '}) 6345 fsock.writelines(comment * 77 + '\n\n') 6346 return fsock
6347 6348
6349 - def write_all(self):
6350 """ write all the files """ 6351 #write the part related to the external parameter 6352 self.create_ident_card() 6353 self.create_param_read() 6354 6355 #write the definition of the parameter 6356 self.create_input() 6357 self.create_intparam_def(dp=True,mp=False) 6358 if self.opt['mp']: 6359 self.create_intparam_def(dp=False,mp=True) 6360 6361 # definition of the coupling. 6362 self.create_actualize_mp_ext_param_inc() 6363 self.create_coupl_inc() 6364 self.create_write_couplings() 6365 self.create_couplings() 6366 6367 # the makefile 6368 self.create_makeinc() 6369 self.create_param_write() 6370 6371 # The model functions 6372 self.create_model_functions_inc() 6373 self.create_model_functions_def() 6374 6375 # The param_card.dat 6376 self.create_param_card() 6377 6378 6379 # All the standard files 6380 self.copy_standard_file()
6381 6382 ############################################################################ 6383 ## ROUTINE CREATING THE FILES ############################################ 6384 ############################################################################ 6385
6386 - def copy_standard_file(self):
6387 """Copy the standard files for the fortran model.""" 6388 6389 #copy the library files 6390 file_to_link = ['formats.inc','printout.f', \ 6391 'rw_para.f', 'testprog.f'] 6392 6393 for filename in file_to_link: 6394 cp( MG5DIR + '/models/template_files/fortran/' + filename, \ 6395 self.dir_path) 6396 6397 file = open(os.path.join(MG5DIR,\ 6398 'models/template_files/fortran/rw_para.f')).read() 6399 6400 includes=["include \'coupl.inc\'","include \'input.inc\'", 6401 "include \'model_functions.inc\'"] 6402 if self.opt['mp']: 6403 includes.extend(["include \'mp_coupl.inc\'","include \'mp_input.inc\'"]) 6404 # In standalone and madloop we do no use the compiled param card but 6405 # still parse the .dat one so we must load it. 6406 if self.opt['loop_induced']: 6407 #loop induced follow MadEvent way to handle the card. 6408 load_card = '' 6409 lha_read_filename='lha_read.f' 6410 updateloop_default = '.true.' 6411 elif self.opt['export_format'] in ['madloop','madloop_optimized', 'madloop_matchbox']: 6412 load_card = 'call LHA_loadcard(param_name,npara,param,value)' 6413 lha_read_filename='lha_read_mp.f' 6414 updateloop_default = '.true.' 6415 elif self.opt['export_format'].startswith('standalone') \ 6416 or self.opt['export_format'] in ['madweight', 'plugin']\ 6417 or self.opt['export_format'].startswith('matchbox'): 6418 load_card = 'call LHA_loadcard(param_name,npara,param,value)' 6419 lha_read_filename='lha_read.f' 6420 updateloop_default = '.true.' 6421 else: 6422 load_card = '' 6423 lha_read_filename='lha_read.f' 6424 updateloop_default = '.false.' 6425 6426 cp( MG5DIR + '/models/template_files/fortran/' + lha_read_filename, \ 6427 os.path.join(self.dir_path,'lha_read.f')) 6428 6429 file=file%{'includes':'\n '.join(includes), 6430 'load_card':load_card, 6431 'updateloop_default': updateloop_default} 6432 writer=open(os.path.join(self.dir_path,'rw_para.f'),'w') 6433 writer.writelines(file) 6434 writer.close() 6435 6436 if self.opt['export_format'] in ['madevent', 'FKS5_default', 'FKS5_optimized'] \ 6437 or self.opt['loop_induced']: 6438 cp( MG5DIR + '/models/template_files/fortran/makefile_madevent', 6439 self.dir_path + '/makefile') 6440 if self.opt['export_format'] in ['FKS5_default', 'FKS5_optimized']: 6441 path = pjoin(self.dir_path, 'makefile') 6442 text = open(path).read() 6443 text = text.replace('madevent','aMCatNLO') 6444 open(path, 'w').writelines(text) 6445 elif self.opt['export_format'] in ['standalone', 'standalone_msP','standalone_msF', 6446 'madloop','madloop_optimized', 'standalone_rw', 6447 'madweight','matchbox','madloop_matchbox', 'plugin']: 6448 cp( MG5DIR + '/models/template_files/fortran/makefile_standalone', 6449 self.dir_path + '/makefile') 6450 #elif self.opt['export_format'] in []: 6451 #pass 6452 else: 6453 raise MadGraph5Error('Unknown format')
6454
6455 - def create_coupl_inc(self):
6456 """ write coupling.inc """ 6457 6458 fsock = self.open('coupl.inc', format='fortran') 6459 if self.opt['mp']: 6460 mp_fsock = self.open('mp_coupl.inc', format='fortran') 6461 mp_fsock_same_name = self.open('mp_coupl_same_name.inc',\ 6462 format='fortran') 6463 6464 # Write header 6465 header = """double precision G 6466 common/strong/ G 6467 6468 double complex gal(2) 6469 common/weak/ gal 6470 6471 double precision MU_R 6472 common/rscale/ MU_R 6473 6474 """ 6475 # Nf is the number of light quark flavours 6476 header = header+"""double precision Nf 6477 parameter(Nf=%dd0) 6478 """ % self.model.get_nflav() 6479 #Nl is the number of massless leptons 6480 header = header+"""double precision Nl 6481 parameter(Nl=%dd0) 6482 """ % self.model.get_nleps() 6483 6484 fsock.writelines(header) 6485 6486 if self.opt['mp']: 6487 header = """%(real_mp_format)s %(mp_prefix)sG 6488 common/MP_strong/ %(mp_prefix)sG 6489 6490 %(complex_mp_format)s %(mp_prefix)sgal(2) 6491 common/MP_weak/ %(mp_prefix)sgal 6492 6493 %(complex_mp_format)s %(mp_prefix)sMU_R 6494 common/MP_rscale/ %(mp_prefix)sMU_R 6495 6496 """ 6497 6498 6499 6500 6501 mp_fsock.writelines(header%{'real_mp_format':self.mp_real_format, 6502 'complex_mp_format':self.mp_complex_format, 6503 'mp_prefix':self.mp_prefix}) 6504 mp_fsock_same_name.writelines(header%{'real_mp_format':self.mp_real_format, 6505 'complex_mp_format':self.mp_complex_format, 6506 'mp_prefix':''}) 6507 6508 # Write the Mass definition/ common block 6509 masses = set() 6510 widths = set() 6511 if self.opt['complex_mass']: 6512 complex_mass = set() 6513 6514 for particle in self.model.get('particles'): 6515 #find masses 6516 one_mass = particle.get('mass') 6517 if one_mass.lower() != 'zero': 6518 masses.add(one_mass) 6519 6520 # find width 6521 one_width = particle.get('width') 6522 if one_width.lower() != 'zero': 6523 widths.add(one_width) 6524 if self.opt['complex_mass'] and one_mass.lower() != 'zero': 6525 complex_mass.add('CMASS_%s' % one_mass) 6526 6527 if masses: 6528 fsock.writelines('double precision '+','.join(masses)+'\n') 6529 fsock.writelines('common/masses/ '+','.join(masses)+'\n\n') 6530 if self.opt['mp']: 6531 mp_fsock_same_name.writelines(self.mp_real_format+' '+\ 6532 ','.join(masses)+'\n') 6533 mp_fsock_same_name.writelines('common/MP_masses/ '+\ 6534 ','.join(masses)+'\n\n') 6535 mp_fsock.writelines(self.mp_real_format+' '+','.join([\ 6536 self.mp_prefix+m for m in masses])+'\n') 6537 mp_fsock.writelines('common/MP_masses/ '+\ 6538 ','.join([self.mp_prefix+m for m in masses])+'\n\n') 6539 6540 if widths: 6541 fsock.writelines('double precision '+','.join(widths)+'\n') 6542 fsock.writelines('common/widths/ '+','.join(widths)+'\n\n') 6543 if self.opt['mp']: 6544 mp_fsock_same_name.writelines(self.mp_real_format+' '+\ 6545 ','.join(widths)+'\n') 6546 mp_fsock_same_name.writelines('common/MP_widths/ '+\ 6547 ','.join(widths)+'\n\n') 6548 mp_fsock.writelines(self.mp_real_format+' '+','.join([\ 6549 self.mp_prefix+w for w in widths])+'\n') 6550 mp_fsock.writelines('common/MP_widths/ '+\ 6551 ','.join([self.mp_prefix+w for w in widths])+'\n\n') 6552 6553 # Write the Couplings 6554 coupling_list = [coupl.name for coupl in self.coups_dep + self.coups_indep] 6555 fsock.writelines('double complex '+', '.join(coupling_list)+'\n') 6556 fsock.writelines('common/couplings/ '+', '.join(coupling_list)+'\n') 6557 if self.opt['mp']: 6558 mp_fsock_same_name.writelines(self.mp_complex_format+' '+\ 6559 ','.join(coupling_list)+'\n') 6560 mp_fsock_same_name.writelines('common/MP_couplings/ '+\ 6561 ','.join(coupling_list)+'\n\n') 6562 mp_fsock.writelines(self.mp_complex_format+' '+','.join([\ 6563 self.mp_prefix+c for c in coupling_list])+'\n') 6564 mp_fsock.writelines('common/MP_couplings/ '+\ 6565 ','.join([self.mp_prefix+c for c in coupling_list])+'\n\n') 6566 6567 # Write complex mass for complex mass scheme (if activated) 6568 if self.opt['complex_mass'] and complex_mass: 6569 fsock.writelines('double complex '+', '.join(complex_mass)+'\n') 6570 fsock.writelines('common/complex_mass/ '+', '.join(complex_mass)+'\n') 6571 if self.opt['mp']: 6572 mp_fsock_same_name.writelines(self.mp_complex_format+' '+\ 6573 ','.join(complex_mass)+'\n') 6574 mp_fsock_same_name.writelines('common/MP_complex_mass/ '+\ 6575 ','.join(complex_mass)+'\n\n') 6576 mp_fsock.writelines(self.mp_complex_format+' '+','.join([\ 6577 self.mp_prefix+cm for cm in complex_mass])+'\n') 6578 mp_fsock.writelines('common/MP_complex_mass/ '+\ 6579 ','.join([self.mp_prefix+cm for cm in complex_mass])+'\n\n')
6580
6581 - def create_write_couplings(self):
6582 """ write the file coupl_write.inc """ 6583 6584 fsock = self.open('coupl_write.inc', format='fortran') 6585 6586 fsock.writelines("""write(*,*) ' Couplings of %s' 6587 write(*,*) ' ---------------------------------' 6588 write(*,*) ' '""" % self.model_name) 6589 def format(coupl): 6590 return 'write(*,2) \'%(name)s = \', %(name)s' % {'name': coupl.name}
6591 6592 # Write the Couplings 6593 lines = [format(coupl) for coupl in self.coups_dep + self.coups_indep] 6594 fsock.writelines('\n'.join(lines)) 6595 6596
6597 - def create_input(self):
6598 """create input.inc containing the definition of the parameters""" 6599 6600 fsock = self.open('input.inc', format='fortran') 6601 if self.opt['mp']: 6602 mp_fsock = self.open('mp_input.inc', format='fortran') 6603 6604 #find mass/ width since they are already define 6605 already_def = set() 6606 for particle in self.model.get('particles'): 6607 already_def.add(particle.get('mass').lower()) 6608 already_def.add(particle.get('width').lower()) 6609 if self.opt['complex_mass']: 6610 already_def.add('cmass_%s' % particle.get('mass').lower()) 6611 6612 is_valid = lambda name: name.lower() not in ['g', 'mu_r', 'zero'] and \ 6613 name.lower() not in already_def 6614 6615 real_parameters = [param.name for param in self.params_dep + 6616 self.params_indep if param.type == 'real' 6617 and is_valid(param.name)] 6618 6619 real_parameters += [param.name for param in self.params_ext 6620 if param.type == 'real'and 6621 is_valid(param.name)] 6622 6623 # check the parameter is a CT parameter or not 6624 # if yes, just use the needed ones 6625 real_parameters = [param for param in real_parameters \ 6626 if self.check_needed_param(param)] 6627 6628 fsock.writelines('double precision '+','.join(real_parameters)+'\n') 6629 fsock.writelines('common/params_R/ '+','.join(real_parameters)+'\n\n') 6630 if self.opt['mp']: 6631 mp_fsock.writelines(self.mp_real_format+' '+','.join([\ 6632 self.mp_prefix+p for p in real_parameters])+'\n') 6633 mp_fsock.writelines('common/MP_params_R/ '+','.join([\ 6634 self.mp_prefix+p for p in real_parameters])+'\n\n') 6635 6636 complex_parameters = [param.name for param in self.params_dep + 6637 self.params_indep if param.type == 'complex' and 6638 is_valid(param.name)] 6639 6640 # check the parameter is a CT parameter or not 6641 # if yes, just use the needed ones 6642 complex_parameters = [param for param in complex_parameters \ 6643 if self.check_needed_param(param)] 6644 6645 if complex_parameters: 6646 fsock.writelines('double complex '+','.join(complex_parameters)+'\n') 6647 fsock.writelines('common/params_C/ '+','.join(complex_parameters)+'\n\n') 6648 if self.opt['mp']: 6649 mp_fsock.writelines(self.mp_complex_format+' '+','.join([\ 6650 self.mp_prefix+p for p in complex_parameters])+'\n') 6651 mp_fsock.writelines('common/MP_params_C/ '+','.join([\ 6652 self.mp_prefix+p for p in complex_parameters])+'\n\n')
6653
6654 - def check_needed_param(self, param):
6655 """ Returns whether the parameter in argument is needed for this 6656 specific computation or not.""" 6657 6658 # If this is a leading order model or if there was no CT parameter 6659 # employed in this NLO model, one can directly return that the 6660 # parameter is needed since only CTParameters are filtered. 6661 if not hasattr(self, 'allCTparameters') or \ 6662 self.allCTparameters is None or self.usedCTparameters is None or \ 6663 len(self.allCTparameters)==0: 6664 return True 6665 6666 # We must allow the conjugate shorthand for the complex parameter as 6667 # well so we check wether either the parameter name or its name with 6668 # 'conjg__' substituted with '' is present in the list. 6669 # This is acceptable even if some parameter had an original name 6670 # including 'conjg__' in it, because at worst we export a parameter 6671 # was not needed. 6672 param = param.lower() 6673 cjg_param = param.replace('conjg__','',1) 6674 6675 # First make sure it is a CTparameter 6676 if param not in self.allCTparameters and \ 6677 cjg_param not in self.allCTparameters: 6678 if hasattr(self.model, "notused_ct_params"): 6679 if param.endswith(('_fin_','_1eps_','_2eps_')): 6680 limit = -2 6681 elif param.endswith(('_1eps','_2eps')): 6682 limit =-1 6683 else: 6684 limit = 0 6685 base = '_'.join(param.split('_')[1:limit]) 6686 if base in self.model.notused_ct_params: 6687 return False 6688 return True 6689 6690 # Now check if it is in the list of CTparameters actually used 6691 return (param in self.usedCTparameters or \ 6692 cjg_param in self.usedCTparameters)
6693
6694 - def extract_needed_CTparam(self,wanted_couplings=[]):
6695 """ Extract what are the needed CT parameters given the wanted_couplings""" 6696 6697 if not hasattr(self.model,'map_CTcoup_CTparam') or not wanted_couplings: 6698 # Setting these lists to none wil disable the filtering in 6699 # check_needed_param 6700 self.allCTparameters = None 6701 self.usedCTparameters = None 6702 return 6703 6704 # All CTparameters appearin in all CT couplings 6705 allCTparameters=list(self.model.map_CTcoup_CTparam.values()) 6706 # Define in this class the list of all CT parameters 6707 self.allCTparameters=list(\ 6708 set(itertools.chain.from_iterable(allCTparameters))) 6709 6710 # All used CT couplings 6711 w_coupls = [coupl.lower() for coupl in wanted_couplings] 6712 allUsedCTCouplings = [coupl for coupl in 6713 self.model.map_CTcoup_CTparam.keys() if coupl.lower() in w_coupls] 6714 6715 # Now define the list of all CT parameters that are actually used 6716 self.usedCTparameters=list(\ 6717 set(itertools.chain.from_iterable([ 6718 self.model.map_CTcoup_CTparam[coupl] for coupl in allUsedCTCouplings 6719 ]))) 6720 6721 # Now at last, make these list case insensitive 6722 self.allCTparameters = [ct.lower() for ct in self.allCTparameters] 6723 self.usedCTparameters = [ct.lower() for ct in self.usedCTparameters]
6724
6725 - def create_intparam_def(self, dp=True, mp=False):
6726 """ create intparam_definition.inc setting the internal parameters. 6727 Output the double precision and/or the multiple precision parameters 6728 depending on the parameters dp and mp. If mp only, then the file names 6729 get the 'mp_' prefix. 6730 """ 6731 6732 fsock = self.open('%sintparam_definition.inc'% 6733 ('mp_' if mp and not dp else ''), format='fortran') 6734 6735 fsock.write_comments(\ 6736 "Parameters that should not be recomputed event by event.\n") 6737 fsock.writelines("if(readlha) then\n") 6738 if dp: 6739 fsock.writelines("G = 2 * DSQRT(AS*PI) ! for the first init\n") 6740 if mp: 6741 fsock.writelines("MP__G = 2 * SQRT(MP__AS*MP__PI) ! for the first init\n") 6742 6743 for param in self.params_indep: 6744 if param.name == 'ZERO': 6745 continue 6746 # check whether the parameter is a CT parameter 6747 # if yes,just used the needed ones 6748 if not self.check_needed_param(param.name): 6749 continue 6750 if dp: 6751 fsock.writelines("%s = %s\n" % (param.name, 6752 self.p_to_f.parse(param.expr))) 6753 if mp: 6754 fsock.writelines("%s%s = %s\n" % (self.mp_prefix,param.name, 6755 self.mp_p_to_f.parse(param.expr))) 6756 6757 fsock.writelines('endif') 6758 6759 fsock.write_comments('\nParameters that should be recomputed at an event by even basis.\n') 6760 if dp: 6761 fsock.writelines("aS = G**2/4/pi\n") 6762 if mp: 6763 fsock.writelines("MP__aS = MP__G**2/4/MP__PI\n") 6764 6765 # these are the parameters needed for the loops 6766 if hasattr(self, 'allCTparameters') and self.allCTparameters: 6767 ct_params = [param for param in self.params_dep \ 6768 if self.check_needed_param(param.name) and \ 6769 param.name.lower() in self.allCTparameters] 6770 else: 6771 ct_params = [] 6772 6773 for param in self.params_dep: 6774 # skip the CT parameters, which have already been done before 6775 if not self.check_needed_param(param.name) or param in ct_params: 6776 continue 6777 if dp: 6778 fsock.writelines("%s = %s\n" % (param.name, 6779 self.p_to_f.parse(param.expr))) 6780 elif mp: 6781 fsock.writelines("%s%s = %s\n" % (self.mp_prefix,param.name, 6782 self.mp_p_to_f.parse(param.expr))) 6783 6784 fsock.write_comments('\nParameters that should be updated for the loops.\n') 6785 6786 # do not skip the evaluation of these parameters in MP 6787 if not mp and ct_params: fsock.writelines('if (updateloop) then') 6788 for param in ct_params: 6789 if dp: 6790 fsock.writelines("%s = %s\n" % (param.name, 6791 self.p_to_f.parse(param.expr))) 6792 elif mp: 6793 fsock.writelines("%s%s = %s\n" % (self.mp_prefix,param.name, 6794 self.mp_p_to_f.parse(param.expr))) 6795 6796 if not mp and ct_params: fsock.writelines('endif') 6797 6798 fsock.write_comments("\nDefinition of the EW coupling used in the write out of aqed\n") 6799 6800 # Let us not necessarily investigate the presence of alpha_EW^-1 of Gf as an external parameter, but also just as a parameter 6801 if ('aEWM1',) in self.model['parameters'] or \ 6802 any( ('aEWM1'.lower() in [p.name.lower() for p in p_list]) for p_list in self.model['parameters'].values() ): 6803 if dp: 6804 fsock.writelines(""" gal(1) = 3.5449077018110318d0 / DSQRT(ABS(aEWM1)) 6805 gal(2) = 1d0 6806 """) 6807 elif mp: 6808 fsock.writelines(""" %(mp_prefix)sgal(1) = 2 * SQRT(MP__PI/ABS(MP__aEWM1)) 6809 %(mp_prefix)sgal(2) = 1d0 6810 """ %{'mp_prefix':self.mp_prefix}) 6811 pass 6812 # in Gmu scheme, aEWM1 is not external but Gf is an exteranl variable 6813 elif ('Gf',) in self.model['parameters']: 6814 # Make sure to consider complex masses if the complex mass scheme is activated 6815 if self.opt['complex_mass']: 6816 mass_prefix = 'CMASS_MDL_' 6817 else: 6818 mass_prefix = 'MDL_' 6819 6820 if dp: 6821 if self.opt['complex_mass']: 6822 fsock.writelines(""" gal(1) = ABS(2.378414230005442133435d0*%(mass_prefix)sMW*SQRT(DCMPLX(1.0D0,0.0d0)-%(mass_prefix)sMW**2/%(mass_prefix)sMZ**2)*DSQRT(MDL_Gf)) 6823 gal(2) = 1d0 6824 """%{'mass_prefix':mass_prefix}) 6825 else: 6826 fsock.writelines(""" gal(1) = 2.378414230005442133435d0*%(mass_prefix)sMW*DSQRT(1D0-%(mass_prefix)sMW**2/%(mass_prefix)sMZ**2)*DSQRT(MDL_Gf) 6827 gal(2) = 1d0 6828 """%{'mass_prefix':mass_prefix}) 6829 elif mp: 6830 if self.opt['complex_mass']: 6831 fsock.writelines(""" %(mp_prefix)sgal(1) = ABS(2*%(mp_prefix)s%(mass_prefix)sMW*SQRT(CMPLX(1e0_16,0.0e0_16,KIND=16)-%(mp_prefix)s%(mass_prefix)sMW**2/%(mp_prefix)s%(mass_prefix)sMZ**2)*SQRT(SQRT(2e0_16)*%(mp_prefix)sMDL_Gf)) 6832 %(mp_prefix)sgal(2) = 1e0_16 6833 """ %{'mp_prefix':self.mp_prefix,'mass_prefix':mass_prefix}) 6834 else: 6835 fsock.writelines(""" %(mp_prefix)sgal(1) = 2*%(mp_prefix)s%(mass_prefix)sMW*SQRT(1e0_16-%(mp_prefix)s%(mass_prefix)sMW**2/%(mp_prefix)s%(mass_prefix)sMZ**2)*SQRT(SQRT(2e0_16)*%(mp_prefix)sMDL_Gf) 6836 %(mp_prefix)sgal(2) = 1e0_16 6837 """ %{'mp_prefix':self.mp_prefix,'mass_prefix':mass_prefix}) 6838 6839 pass 6840 else: 6841 if dp: 6842 logger.warning('$RED aEWM1 and Gf not define in MODEL. AQED will not be written correcty in LHE FILE') 6843 fsock.writelines(""" gal(1) = 1d0 6844 gal(2) = 1d0 6845 """) 6846 elif mp: 6847 fsock.writelines(""" %(mp_prefix)sgal(1) = 1e0_16 6848 %(mp_prefix)sgal(2) = 1e0_16 6849 """%{'mp_prefix':self.mp_prefix})
6850 6851
6852 - def create_couplings(self):
6853 """ create couplings.f and all couplingsX.f """ 6854 6855 nb_def_by_file = 25 6856 6857 self.create_couplings_main(nb_def_by_file) 6858 nb_coup_indep = 1 + len(self.coups_indep) // nb_def_by_file 6859 nb_coup_dep = 1 + len(self.coups_dep) // nb_def_by_file 6860 6861 for i in range(nb_coup_indep): 6862 # For the independent couplings, we compute the double and multiple 6863 # precision ones together 6864 data = self.coups_indep[nb_def_by_file * i: 6865 min(len(self.coups_indep), nb_def_by_file * (i+1))] 6866 self.create_couplings_part(i + 1, data, dp=True, mp=self.opt['mp']) 6867 6868 for i in range(nb_coup_dep): 6869 # For the dependent couplings, we compute the double and multiple 6870 # precision ones in separate subroutines. 6871 data = self.coups_dep[nb_def_by_file * i: 6872 min(len(self.coups_dep), nb_def_by_file * (i+1))] 6873 self.create_couplings_part( i + 1 + nb_coup_indep , data, 6874 dp=True,mp=False) 6875 if self.opt['mp']: 6876 self.create_couplings_part( i + 1 + nb_coup_indep , data, 6877 dp=False,mp=True)
6878 6879
6880 - def create_couplings_main(self, nb_def_by_file=25):
6881 """ create couplings.f """ 6882 6883 fsock = self.open('couplings.f', format='fortran') 6884 6885 fsock.writelines("""subroutine coup() 6886 6887 implicit none 6888 double precision PI, ZERO 6889 logical READLHA 6890 parameter (PI=3.141592653589793d0) 6891 parameter (ZERO=0d0) 6892 include \'model_functions.inc\'""") 6893 if self.opt['mp']: 6894 fsock.writelines("""%s MP__PI, MP__ZERO 6895 parameter (MP__PI=3.1415926535897932384626433832795e0_16) 6896 parameter (MP__ZERO=0e0_16) 6897 include \'mp_input.inc\' 6898 include \'mp_coupl.inc\' 6899 """%self.mp_real_format) 6900 fsock.writelines("""logical updateloop 6901 common /to_updateloop/updateloop 6902 include \'input.inc\' 6903 include \'coupl.inc\' 6904 READLHA = .true. 6905 include \'intparam_definition.inc\'""") 6906 if self.opt['mp']: 6907 fsock.writelines("""include \'mp_intparam_definition.inc\'\n""") 6908 6909 nb_coup_indep = 1 + len(self.coups_indep) // nb_def_by_file 6910 nb_coup_dep = 1 + len(self.coups_dep) // nb_def_by_file 6911 6912 fsock.writelines('\n'.join(\ 6913 ['call coup%s()' % (i + 1) for i in range(nb_coup_indep)])) 6914 6915 fsock.write_comments('\ncouplings needed to be evaluated points by points\n') 6916 6917 fsock.writelines('\n'.join(\ 6918 ['call coup%s()' % (nb_coup_indep + i + 1) \ 6919 for i in range(nb_coup_dep)])) 6920 if self.opt['mp']: 6921 fsock.writelines('\n'.join(\ 6922 ['call mp_coup%s()' % (nb_coup_indep + i + 1) \ 6923 for i in range(nb_coup_dep)])) 6924 fsock.writelines('''\n return \n end\n''') 6925 6926 fsock.writelines("""subroutine update_as_param() 6927 6928 implicit none 6929 double precision PI, ZERO 6930 logical READLHA 6931 parameter (PI=3.141592653589793d0) 6932 parameter (ZERO=0d0) 6933 logical updateloop 6934 common /to_updateloop/updateloop 6935 include \'model_functions.inc\'""") 6936 fsock.writelines("""include \'input.inc\' 6937 include \'coupl.inc\' 6938 READLHA = .false.""") 6939 fsock.writelines(""" 6940 include \'intparam_definition.inc\'\n 6941 """) 6942 6943 nb_coup_indep = 1 + len(self.coups_indep) // nb_def_by_file 6944 nb_coup_dep = 1 + len(self.coups_dep) // nb_def_by_file 6945 6946 fsock.write_comments('\ncouplings needed to be evaluated points by points\n') 6947 6948 fsock.writelines('\n'.join(\ 6949 ['call coup%s()' % (nb_coup_indep + i + 1) \ 6950 for i in range(nb_coup_dep)])) 6951 fsock.writelines('''\n return \n end\n''') 6952 6953 fsock.writelines("""subroutine update_as_param2(mu_r2,as2) 6954 6955 implicit none 6956 double precision PI 6957 parameter (PI=3.141592653589793d0) 6958 double precision mu_r2, as2 6959 include \'model_functions.inc\'""") 6960 fsock.writelines("""include \'input.inc\' 6961 include \'coupl.inc\'""") 6962 fsock.writelines(""" 6963 if (mu_r2.gt.0d0) MU_R = mu_r2 6964 G = SQRT(4.0d0*PI*AS2) 6965 AS = as2 6966 6967 CALL UPDATE_AS_PARAM() 6968 """) 6969 fsock.writelines('''\n return \n end\n''') 6970 6971 if self.opt['mp']: 6972 fsock.writelines("""subroutine mp_update_as_param() 6973 6974 implicit none 6975 logical READLHA 6976 include \'model_functions.inc\'""") 6977 fsock.writelines("""%s MP__PI, MP__ZERO 6978 parameter (MP__PI=3.1415926535897932384626433832795e0_16) 6979 parameter (MP__ZERO=0e0_16) 6980 include \'mp_input.inc\' 6981 include \'mp_coupl.inc\' 6982 """%self.mp_real_format) 6983 fsock.writelines("""include \'input.inc\' 6984 include \'coupl.inc\' 6985 include \'actualize_mp_ext_params.inc\' 6986 READLHA = .false. 6987 include \'mp_intparam_definition.inc\'\n 6988 """) 6989 6990 nb_coup_indep = 1 + len(self.coups_indep) // nb_def_by_file 6991 nb_coup_dep = 1 + len(self.coups_dep) // nb_def_by_file 6992 6993 fsock.write_comments('\ncouplings needed to be evaluated points by points\n') 6994 6995 fsock.writelines('\n'.join(\ 6996 ['call mp_coup%s()' % (nb_coup_indep + i + 1) \ 6997 for i in range(nb_coup_dep)])) 6998 fsock.writelines('''\n return \n end\n''')
6999
7000 - def create_couplings_part(self, nb_file, data, dp=True, mp=False):
7001 """ create couplings[nb_file].f containing information coming from data. 7002 Outputs the computation of the double precision and/or the multiple 7003 precision couplings depending on the parameters dp and mp. 7004 If mp is True and dp is False, then the prefix 'MP_' is appended to the 7005 filename and subroutine name. 7006 """ 7007 7008 fsock = self.open('%scouplings%s.f' %('mp_' if mp and not dp else '', 7009 nb_file), format='fortran') 7010 fsock.writelines("""subroutine %scoup%s() 7011 7012 implicit none 7013 include \'model_functions.inc\'"""%('mp_' if mp and not dp else '',nb_file)) 7014 if dp: 7015 fsock.writelines(""" 7016 double precision PI, ZERO 7017 parameter (PI=3.141592653589793d0) 7018 parameter (ZERO=0d0) 7019 include 'input.inc' 7020 include 'coupl.inc'""") 7021 if mp: 7022 fsock.writelines("""%s MP__PI, MP__ZERO 7023 parameter (MP__PI=3.1415926535897932384626433832795e0_16) 7024 parameter (MP__ZERO=0e0_16) 7025 include \'mp_input.inc\' 7026 include \'mp_coupl.inc\' 7027 """%self.mp_real_format) 7028 7029 for coupling in data: 7030 if dp: 7031 fsock.writelines('%s = %s' % (coupling.name, 7032 self.p_to_f.parse(coupling.expr))) 7033 if mp: 7034 fsock.writelines('%s%s = %s' % (self.mp_prefix,coupling.name, 7035 self.mp_p_to_f.parse(coupling.expr))) 7036 fsock.writelines('end')
7037
7038 - def create_model_functions_inc(self):
7039 """ Create model_functions.inc which contains the various declarations 7040 of auxiliary functions which might be used in the couplings expressions 7041 """ 7042 7043 additional_fct = [] 7044 # check for functions define in the UFO model 7045 ufo_fct = self.model.get('functions') 7046 if ufo_fct: 7047 for fct in ufo_fct: 7048 # already handle by default 7049 if str(fct.name) not in ["complexconjugate", "re", "im", "sec", 7050 "csc", "asec", "acsc", "theta_function", "cond", 7051 "condif", "reglogp", "reglogm", "reglog", "recms", "arg", "cot", 7052 "grreglog","regsqrt","B0F","sqrt_trajectory", 7053 "log_trajectory"]: 7054 additional_fct.append(fct.name) 7055 7056 fsock = self.open('model_functions.inc', format='fortran') 7057 fsock.writelines("""double complex cond 7058 double complex condif 7059 double complex reglog 7060 double complex reglogp 7061 double complex reglogm 7062 double complex regsqrt 7063 double complex grreglog 7064 double complex recms 7065 double complex arg 7066 double complex B0F 7067 double complex sqrt_trajectory 7068 double complex log_trajectory 7069 %s 7070 """ % "\n".join([" double complex %s" % i for i in additional_fct])) 7071 7072 7073 if self.opt['mp']: 7074 fsock.writelines("""%(complex_mp_format)s mp_cond 7075 %(complex_mp_format)s mp_condif 7076 %(complex_mp_format)s mp_reglog 7077 %(complex_mp_format)s mp_reglogp 7078 %(complex_mp_format)s mp_reglogm 7079 %(complex_mp_format)s mp_regsqrt 7080 %(complex_mp_format)s mp_grreglog 7081 %(complex_mp_format)s mp_recms 7082 %(complex_mp_format)s mp_arg 7083 %(complex_mp_format)s mp_B0F 7084 %(complex_mp_format)s mp_sqrt_trajectory 7085 %(complex_mp_format)s mp_log_trajectory 7086 %(additional)s 7087 """ %\ 7088 {"additional": "\n".join([" %s mp_%s" % (self.mp_complex_format, i) for i in additional_fct]), 7089 'complex_mp_format':self.mp_complex_format 7090 })
7091
7092 - def create_model_functions_def(self):
7093 """ Create model_functions.f which contains the various definitions 7094 of auxiliary functions which might be used in the couplings expressions 7095 Add the functions.f functions for formfactors support 7096 """ 7097 7098 fsock = self.open('model_functions.f', format='fortran') 7099 fsock.writelines("""double complex function cond(condition,truecase,falsecase) 7100 implicit none 7101 double complex condition,truecase,falsecase 7102 if(condition.eq.(0.0d0,0.0d0)) then 7103 cond=truecase 7104 else 7105 cond=falsecase 7106 endif 7107 end 7108 7109 double complex function condif(condition,truecase,falsecase) 7110 implicit none 7111 logical condition 7112 double complex truecase,falsecase 7113 if(condition) then 7114 condif=truecase 7115 else 7116 condif=falsecase 7117 endif 7118 end 7119 7120 double complex function recms(condition,expr) 7121 implicit none 7122 logical condition 7123 double complex expr 7124 if(condition)then 7125 recms=expr 7126 else 7127 recms=dcmplx(dble(expr)) 7128 endif 7129 end 7130 7131 double complex function reglog(arg_in) 7132 implicit none 7133 double complex TWOPII 7134 parameter (TWOPII=2.0d0*3.1415926535897932d0*(0.0d0,1.0d0)) 7135 double complex arg_in 7136 double complex arg 7137 arg=arg_in 7138 if(dabs(dimag(arg)).eq.0.0d0)then 7139 arg=dcmplx(dble(arg),0.0d0) 7140 endif 7141 if(dabs(dble(arg)).eq.0.0d0)then 7142 arg=dcmplx(0.0d0,dimag(arg)) 7143 endif 7144 if(arg.eq.(0.0d0,0.0d0)) then 7145 reglog=(0.0d0,0.0d0) 7146 else 7147 reglog=log(arg) 7148 endif 7149 end 7150 7151 double complex function reglogp(arg_in) 7152 implicit none 7153 double complex TWOPII 7154 parameter (TWOPII=2.0d0*3.1415926535897932d0*(0.0d0,1.0d0)) 7155 double complex arg_in 7156 double complex arg 7157 arg=arg_in 7158 if(dabs(dimag(arg)).eq.0.0d0)then 7159 arg=dcmplx(dble(arg),0.0d0) 7160 endif 7161 if(dabs(dble(arg)).eq.0.0d0)then 7162 arg=dcmplx(0.0d0,dimag(arg)) 7163 endif 7164 if(arg.eq.(0.0d0,0.0d0))then 7165 reglogp=(0.0d0,0.0d0) 7166 else 7167 if(dble(arg).lt.0.0d0.and.dimag(arg).lt.0.0d0)then 7168 reglogp=log(arg) + TWOPII 7169 else 7170 reglogp=log(arg) 7171 endif 7172 endif 7173 end 7174 7175 double complex function reglogm(arg_in) 7176 implicit none 7177 double complex TWOPII 7178 parameter (TWOPII=2.0d0*3.1415926535897932d0*(0.0d0,1.0d0)) 7179 double complex arg_in 7180 double complex arg 7181 arg=arg_in 7182 if(dabs(dimag(arg)).eq.0.0d0)then 7183 arg=dcmplx(dble(arg),0.0d0) 7184 endif 7185 if(dabs(dble(arg)).eq.0.0d0)then 7186 arg=dcmplx(0.0d0,dimag(arg)) 7187 endif 7188 if(arg.eq.(0.0d0,0.0d0))then 7189 reglogm=(0.0d0,0.0d0) 7190 else 7191 if(dble(arg).lt.0.0d0.and.dimag(arg).gt.0.0d0)then 7192 reglogm=log(arg) - TWOPII 7193 else 7194 reglogm=log(arg) 7195 endif 7196 endif 7197 end 7198 7199 double complex function regsqrt(arg_in) 7200 implicit none 7201 double complex arg_in 7202 double complex arg 7203 arg=arg_in 7204 if(dabs(dimag(arg)).eq.0.0d0)then 7205 arg=dcmplx(dble(arg),0.0d0) 7206 endif 7207 if(dabs(dble(arg)).eq.0.0d0)then 7208 arg=dcmplx(0.0d0,dimag(arg)) 7209 endif 7210 regsqrt=sqrt(arg) 7211 end 7212 7213 double complex function grreglog(logsw,expr1_in,expr2_in) 7214 implicit none 7215 double complex TWOPII 7216 parameter (TWOPII=2.0d0*3.1415926535897932d0*(0.0d0,1.0d0)) 7217 double complex expr1_in,expr2_in 7218 double complex expr1,expr2 7219 double precision logsw 7220 double precision imagexpr 7221 logical firstsheet 7222 expr1=expr1_in 7223 expr2=expr2_in 7224 if(dabs(dimag(expr1)).eq.0.0d0)then 7225 expr1=dcmplx(dble(expr1),0.0d0) 7226 endif 7227 if(dabs(dble(expr1)).eq.0.0d0)then 7228 expr1=dcmplx(0.0d0,dimag(expr1)) 7229 endif 7230 if(dabs(dimag(expr2)).eq.0.0d0)then 7231 expr2=dcmplx(dble(expr2),0.0d0) 7232 endif 7233 if(dabs(dble(expr2)).eq.0.0d0)then 7234 expr2=dcmplx(0.0d0,dimag(expr2)) 7235 endif 7236 if(expr1.eq.(0.0d0,0.0d0))then 7237 grreglog=(0.0d0,0.0d0) 7238 else 7239 imagexpr=dimag(expr1)*dimag(expr2) 7240 firstsheet=imagexpr.ge.0.0d0 7241 firstsheet=firstsheet.or.dble(expr1).ge.0.0d0 7242 firstsheet=firstsheet.or.dble(expr2).ge.0.0d0 7243 if(firstsheet)then 7244 grreglog=log(expr1) 7245 else 7246 if(dimag(expr1).gt.0.0d0)then 7247 grreglog=log(expr1) - logsw*TWOPII 7248 else 7249 grreglog=log(expr1) + logsw*TWOPII 7250 endif 7251 endif 7252 endif 7253 end 7254 7255 module b0f_caching 7256 7257 type b0f_node 7258 double complex p2,m12,m22 7259 double complex value 7260 type(b0f_node),pointer::parent 7261 type(b0f_node),pointer::left 7262 type(b0f_node),pointer::right 7263 end type b0f_node 7264 7265 contains 7266 7267 subroutine b0f_search(item, head, find) 7268 implicit none 7269 type(b0f_node),pointer,intent(inout)::head,item 7270 logical,intent(out)::find 7271 type(b0f_node),pointer::item1 7272 integer::icomp 7273 find=.false. 7274 nullify(item%parent) 7275 nullify(item%left) 7276 nullify(item%right) 7277 if(.not.associated(head))then 7278 head => item 7279 return 7280 endif 7281 item1 => head 7282 do 7283 icomp=b0f_node_compare(item,item1) 7284 if(icomp.lt.0)then 7285 if(.not.associated(item1%left))then 7286 item1%left => item 7287 item%parent => item1 7288 exit 7289 else 7290 item1 => item1%left 7291 endif 7292 elseif(icomp.gt.0)then 7293 if(.not.associated(item1%right))then 7294 item1%right => item 7295 item%parent => item1 7296 exit 7297 else 7298 item1 => item1%right 7299 endif 7300 else 7301 find=.true. 7302 item%value=item1%value 7303 exit 7304 endif 7305 enddo 7306 return 7307 end 7308 7309 integer function b0f_node_compare(item1,item2) result(res) 7310 implicit none 7311 type(b0f_node),pointer,intent(in)::item1,item2 7312 res=complex_compare(item1%p2,item2%p2) 7313 if(res.ne.0)return 7314 res=complex_compare(item1%m22,item2%m22) 7315 if(res.ne.0)return 7316 res=complex_compare(item1%m12,item2%m12) 7317 return 7318 end 7319 7320 integer function real_compare(r1,r2) result(res) 7321 implicit none 7322 double precision r1,r2 7323 double precision maxr,diff 7324 double precision tiny 7325 parameter (tiny=-1d-14) 7326 maxr=max(abs(r1),abs(r2)) 7327 diff=r1-r2 7328 if(maxr.le.1d-99.or.abs(diff)/max(maxr,1d-99).le.abs(tiny))then 7329 res=0 7330 return 7331 endif 7332 if(diff.gt.0d0)then 7333 res=1 7334 return 7335 else 7336 res=-1 7337 return 7338 endif 7339 end 7340 7341 integer function complex_compare(c1,c2) result(res) 7342 implicit none 7343 double complex c1,c2 7344 double precision r1,r2 7345 r1=dble(c1) 7346 r2=dble(c2) 7347 res=real_compare(r1,r2) 7348 if(res.ne.0)return 7349 r1=dimag(c1) 7350 r2=dimag(c2) 7351 res=real_compare(r1,r2) 7352 return 7353 end 7354 7355 end module b0f_caching 7356 7357 double complex function B0F(p2,m12,m22) 7358 use b0f_caching 7359 implicit none 7360 double complex p2,m12,m22 7361 double complex zero,TWOPII 7362 parameter (zero=(0.0d0,0.0d0)) 7363 parameter (TWOPII=2.0d0*3.1415926535897932d0*(0.0d0,1.0d0)) 7364 double precision M,M2,Ga,Ga2 7365 double precision tiny 7366 parameter (tiny=-1d-14) 7367 double complex logterms 7368 double complex log_trajectory 7369 logical use_caching 7370 parameter (use_caching=.true.) 7371 type(b0f_node),pointer::item 7372 type(b0f_node),pointer,save::b0f_bt 7373 integer init 7374 save init 7375 data init /0/ 7376 logical find 7377 IF(m12.eq.zero)THEN 7378 c it is a special case 7379 c refer to Eq.(5.48) in arXiv:1804.10017 7380 M=DBLE(p2) ! M^2 7381 M2=DBLE(m22) ! M2^2 7382 IF(M.LT.tiny.OR.M2.LT.tiny)THEN 7383 WRITE(*,*)'ERROR:B0F is not well defined when M^2,M2^2<0' 7384 STOP 7385 ENDIF 7386 M=DSQRT(DABS(M)) 7387 M2=DSQRT(DABS(M2)) 7388 IF(M.EQ.0d0)THEN 7389 Ga=0d0 7390 ELSE 7391 Ga=-DIMAG(p2)/M 7392 ENDIF 7393 IF(M2.EQ.0d0)THEN 7394 Ga2=0d0 7395 ELSE 7396 Ga2=-DIMAG(m22)/M2 7397 ENDIF 7398 IF(p2.ne.m22.and.p2.ne.zero.and.m22.ne.zero)THEN 7399 b0f=(m22-p2)/p2*LOG((m22-p2)/m22) 7400 IF(M.GT.M2.and.Ga*M2.GT.Ga2*M)THEN 7401 b0f=b0f-TWOPII 7402 ENDIF 7403 RETURN 7404 ELSE 7405 WRITE(*,*)'ERROR:B0F is not supported for a simple form' 7406 STOP 7407 ENDIF 7408 ENDIF 7409 c the general case 7410 c trajectory method as advocated in arXiv:1804.10017 (Eq.(E.47)) 7411 if(use_caching)then 7412 if(init.eq.0)then 7413 nullify(b0f_bt) 7414 init=1 7415 endif 7416 allocate(item) 7417 item%p2=p2 7418 item%m12=m12 7419 item%m22=m22 7420 find=.false. 7421 call b0f_search(item,b0f_bt,find) 7422 if(find)then 7423 b0f=item%value 7424 deallocate(item) 7425 return 7426 else 7427 logterms=log_trajectory(100,p2,m12,m22) 7428 b0f=-LOG(p2/m22)+logterms 7429 item%value=b0f 7430 return 7431 endif 7432 else 7433 logterms=log_trajectory(100,p2,m12,m22) 7434 b0f=-LOG(p2/m22)+logterms 7435 endif 7436 RETURN 7437 end 7438 7439 double complex function sqrt_trajectory(n_seg,p2,m12,m22) 7440 c only needed when p2*m12*m22=\=0 7441 implicit none 7442 integer n_seg ! number of segments 7443 double complex p2,m12,m22 7444 double complex zero,one 7445 parameter (zero=(0.0d0,0.0d0),one=(1.0d0,0.0d0)) 7446 double complex gamma0,gamma1 7447 double precision M,Ga,dGa,Ga_start 7448 double precision Gai,intersection 7449 double complex argim1,argi,p2i 7450 double complex gamma0i,gamma1i 7451 double precision tiny 7452 parameter (tiny=-1d-24) 7453 integer i 7454 double precision prefactor 7455 IF(ABS(p2*m12*m22).EQ.0d0)THEN 7456 WRITE(*,*)'ERROR:sqrt_trajectory works when p2*m12*m22/=0' 7457 STOP 7458 ENDIF 7459 M=DBLE(p2) ! M^2 7460 M=DSQRT(DABS(M)) 7461 IF(M.EQ.0d0)THEN 7462 Ga=0d0 7463 ELSE 7464 Ga=-DIMAG(p2)/M 7465 ENDIF 7466 c Eq.(5.37) in arXiv:1804.10017 7467 gamma0=one+m12/p2-m22/p2 7468 gamma1=m12/p2-dcmplx(0d0,1d0)*ABS(tiny)/p2 7469 IF(ABS(Ga).EQ.0d0)THEN 7470 sqrt_trajectory=SQRT(gamma0**2-4d0*gamma1) 7471 RETURN 7472 ENDIF 7473 c segments from -DABS(tiny*Ga) to Ga 7474 Ga_start=-DABS(tiny*Ga) 7475 dGa=(Ga-Ga_start)/n_seg 7476 prefactor=1d0 7477 Gai=Ga_start 7478 p2i=dcmplx(M**2,-Gai*M) 7479 gamma0i=one+m12/p2i-m22/p2i 7480 gamma1i=m12/p2i-dcmplx(0d0,1d0)*ABS(tiny)/p2i 7481 argim1=gamma0i**2-4d0*gamma1i 7482 DO i=1,n_seg 7483 Gai=dGa*i+Ga_start 7484 p2i=dcmplx(M**2,-Gai*M) 7485 gamma0i=one+m12/p2i-m22/p2i 7486 gamma1i=m12/p2i-dcmplx(0d0,1d0)*ABS(tiny)/p2i 7487 argi=gamma0i**2-4d0*gamma1i 7488 IF(DIMAG(argi)*DIMAG(argim1).LT.0d0)THEN 7489 intersection=DIMAG(argim1)*(DBLE(argi)-DBLE(argim1)) 7490 intersection=intersection/(DIMAG(argi)-DIMAG(argim1)) 7491 intersection=intersection-DBLE(argim1) 7492 IF(intersection.GT.0d0)THEN 7493 prefactor=-prefactor 7494 ENDIF 7495 ENDIF 7496 argim1=argi 7497 ENDDO 7498 sqrt_trajectory=SQRT(gamma0**2-4d0*gamma1)*prefactor 7499 RETURN 7500 end 7501 7502 double complex function log_trajectory(n_seg,p2,m12,m22) 7503 c sum of log terms appearing in Eq.(5.35) of arXiv:1804.10017 7504 c only needed when p2*m12*m22=\=0 7505 implicit none 7506 c 4 possible logarithms appearing in Eq.(5.35) of arXiv:1804.10017 7507 c log(arg(i)) with arg(i) for i=1 to 4 7508 c i=1: (ga_{+}-1) 7509 c i=2: (ga_{-}-1) 7510 c i=3: (ga_{+}-1)/ga_{+} 7511 c i=4: (ga_{-}-1)/ga_{-} 7512 integer n_seg ! number of segments 7513 double complex p2,m12,m22 7514 double complex zero,one,half,TWOPII 7515 parameter (zero=(0.0d0,0.0d0),one=(1.0d0,0.0d0)) 7516 parameter (half=(0.5d0,0.0d0)) 7517 parameter (TWOPII=2.0d0*3.1415926535897932d0*(0.0d0,1.0d0)) 7518 double complex gamma0,gammap,gammam,sqrtterm 7519 double precision M,Ga,dGa,Ga_start 7520 double precision Gai,intersection 7521 double complex argim1(4),argi(4),p2i,sqrttermi 7522 double complex gamma0i,gammapi,gammami 7523 double precision tiny 7524 parameter (tiny=-1d-14) 7525 integer i,j 7526 double complex addfactor(4) 7527 double complex sqrt_trajectory 7528 IF(ABS(p2*m12*m22).EQ.0d0)THEN 7529 WRITE(*,*)'ERROR:log_trajectory works when p2*m12*m22/=0' 7530 STOP 7531 ENDIF 7532 M=DBLE(p2) ! M^2 7533 M=DSQRT(DABS(M)) 7534 IF(M.EQ.0d0)THEN 7535 Ga=0d0 7536 ELSE 7537 Ga=-DIMAG(p2)/M 7538 ENDIF 7539 c Eq.(5.36-5.38) in arXiv:1804.10017 7540 sqrtterm=sqrt_trajectory(n_seg,p2,m12,m22) 7541 gamma0=one+m12/p2-m22/p2 7542 gammap=half*(gamma0+sqrtterm) 7543 gammam=half*(gamma0-sqrtterm) 7544 IF(ABS(Ga).EQ.0d0)THEN 7545 log_trajectory=-LOG(gammap-one)-LOG(gammam-one)+gammap*LOG((gammap-one)/gammap)+gammam*LOG((gammam-one)/gammam) 7546 RETURN 7547 ENDIF 7548 c segments from -DABS(tiny*Ga) to Ga 7549 Ga_start=-DABS(tiny*Ga) 7550 dGa=(Ga-Ga_start)/n_seg 7551 addfactor(1:4)=zero 7552 Gai=Ga_start 7553 p2i=dcmplx(M**2,-Gai*M) 7554 sqrttermi=sqrt_trajectory(n_seg,p2i,m12,m22) 7555 gamma0i=one+m12/p2i-m22/p2i 7556 gammapi=half*(gamma0i+sqrttermi) 7557 gammami=half*(gamma0i-sqrttermi) 7558 argim1(1)=gammapi-one 7559 argim1(2)=gammami-one 7560 argim1(3)=(gammapi-one)/gammapi 7561 argim1(4)=(gammami-one)/gammami 7562 DO i=1,n_seg 7563 Gai=dGa*i+Ga_start 7564 p2i=dcmplx(M**2,-Gai*M) 7565 sqrttermi=sqrt_trajectory(n_seg,p2i,m12,m22) 7566 gamma0i=one+m12/p2i-m22/p2i 7567 gammapi=half*(gamma0i+sqrttermi) 7568 gammami=half*(gamma0i-sqrttermi) 7569 argi(1)=gammapi-one 7570 argi(2)=gammami-one 7571 argi(3)=(gammapi-one)/gammapi 7572 argi(4)=(gammami-one)/gammami 7573 DO j=1,4 7574 IF(DIMAG(argi(j))*DIMAG(argim1(j)).LT.0d0)THEN 7575 intersection=DIMAG(argim1(j))*(DBLE(argi(j))-DBLE(argim1(j))) 7576 intersection=intersection/(DIMAG(argi(j))-DIMAG(argim1(j))) 7577 intersection=intersection-DBLE(argim1(j)) 7578 IF(intersection.GT.0d0)THEN 7579 IF(DIMAG(argim1(j)).LT.0)THEN 7580 addfactor(j)=addfactor(j)-TWOPII 7581 ELSE 7582 addfactor(j)=addfactor(j)+TWOPII 7583 ENDIF 7584 ENDIF 7585 ENDIF 7586 argim1(j)=argi(j) 7587 ENDDO 7588 ENDDO 7589 log_trajectory=-(LOG(gammap-one)+addfactor(1))-(LOG(gammam-one)+addfactor(2)) 7590 log_trajectory=log_trajectory+gammap*(LOG((gammap-one)/gammap)+addfactor(3)) 7591 log_trajectory=log_trajectory+gammam*(LOG((gammam-one)/gammam)+addfactor(4)) 7592 RETURN 7593 end 7594 7595 double complex function arg(comnum) 7596 implicit none 7597 double complex comnum 7598 double complex iim 7599 iim = (0.0d0,1.0d0) 7600 if(comnum.eq.(0.0d0,0.0d0)) then 7601 arg=(0.0d0,0.0d0) 7602 else 7603 arg=log(comnum/abs(comnum))/iim 7604 endif 7605 end""") 7606 if self.opt['mp']: 7607 fsock.writelines(""" 7608 7609 %(complex_mp_format)s function mp_cond(condition,truecase,falsecase) 7610 implicit none 7611 %(complex_mp_format)s condition,truecase,falsecase 7612 if(condition.eq.(0.0e0_16,0.0e0_16)) then 7613 mp_cond=truecase 7614 else 7615 mp_cond=falsecase 7616 endif 7617 end 7618 7619 %(complex_mp_format)s function mp_condif(condition,truecase,falsecase) 7620 implicit none 7621 logical condition 7622 %(complex_mp_format)s truecase,falsecase 7623 if(condition) then 7624 mp_condif=truecase 7625 else 7626 mp_condif=falsecase 7627 endif 7628 end 7629 7630 %(complex_mp_format)s function mp_recms(condition,expr) 7631 implicit none 7632 logical condition 7633 %(complex_mp_format)s expr 7634 if(condition)then 7635 mp_recms=expr 7636 else 7637 mp_recms=cmplx(real(expr),kind=16) 7638 endif 7639 end 7640 7641 7642 %(complex_mp_format)s function mp_reglog(arg_in) 7643 implicit none 7644 %(complex_mp_format)s TWOPII 7645 parameter (TWOPII=2.0e0_16*3.14169258478796109557151794433593750e0_16*(0.0e0_16,1.0e0_16)) 7646 %(complex_mp_format)s arg_in 7647 %(complex_mp_format)s arg 7648 arg=arg_in 7649 if(abs(imagpart(arg)).eq.0.0e0_16)then 7650 arg=cmplx(real(arg,kind=16),0.0e0_16) 7651 endif 7652 if(abs(real(arg,kind=16)).eq.0.0e0_16)then 7653 arg=cmplx(0.0e0_16,imagpart(arg)) 7654 endif 7655 if(arg.eq.(0.0e0_16,0.0e0_16)) then 7656 mp_reglog=(0.0e0_16,0.0e0_16) 7657 else 7658 mp_reglog=log(arg) 7659 endif 7660 end 7661 7662 %(complex_mp_format)s function mp_reglogp(arg_in) 7663 implicit none 7664 %(complex_mp_format)s TWOPII 7665 parameter (TWOPII=2.0e0_16*3.14169258478796109557151794433593750e0_16*(0.0e0_16,1.0e0_16)) 7666 %(complex_mp_format)s arg_in 7667 %(complex_mp_format)s arg 7668 arg=arg_in 7669 if(abs(imagpart(arg)).eq.0.0e0_16)then 7670 arg=cmplx(real(arg,kind=16),0.0e0_16) 7671 endif 7672 if(abs(real(arg,kind=16)).eq.0.0e0_16)then 7673 arg=cmplx(0.0e0_16,imagpart(arg)) 7674 endif 7675 if(arg.eq.(0.0e0_16,0.0e0_16))then 7676 mp_reglogp=(0.0e0_16,0.0e0_16) 7677 else 7678 if(real(arg,kind=16).lt.0.0e0_16.and.imagpart(arg).lt.0.0e0_16)then 7679 mp_reglogp=log(arg) + TWOPII 7680 else 7681 mp_reglogp=log(arg) 7682 endif 7683 endif 7684 end 7685 7686 %(complex_mp_format)s function mp_reglogm(arg_in) 7687 implicit none 7688 %(complex_mp_format)s TWOPII 7689 parameter (TWOPII=2.0e0_16*3.14169258478796109557151794433593750e0_16*(0.0e0_16,1.0e0_16)) 7690 %(complex_mp_format)s arg_in 7691 %(complex_mp_format)s arg 7692 arg=arg_in 7693 if(abs(imagpart(arg)).eq.0.0e0_16)then 7694 arg=cmplx(real(arg,kind=16),0.0e0_16) 7695 endif 7696 if(abs(real(arg,kind=16)).eq.0.0e0_16)then 7697 arg=cmplx(0.0e0_16,imagpart(arg)) 7698 endif 7699 if(arg.eq.(0.0e0_16,0.0e0_16))then 7700 mp_reglogm=(0.0e0_16,0.0e0_16) 7701 else 7702 if(real(arg,kind=16).lt.0.0e0_16.and.imagpart(arg).gt.0.0e0_16)then 7703 mp_reglogm=log(arg) - TWOPII 7704 else 7705 mp_reglogm=log(arg) 7706 endif 7707 endif 7708 end 7709 7710 %(complex_mp_format)s function mp_regsqrt(arg_in) 7711 implicit none 7712 %(complex_mp_format)s arg_in 7713 %(complex_mp_format)s arg 7714 arg=arg_in 7715 if(abs(imagpart(arg)).eq.0.0e0_16)then 7716 arg=cmplx(real(arg,kind=16),0.0e0_16) 7717 endif 7718 if(abs(real(arg,kind=16)).eq.0.0e0_16)then 7719 arg=cmplx(0.0e0_16,imagpart(arg)) 7720 endif 7721 mp_regsqrt=sqrt(arg) 7722 end 7723 7724 %(complex_mp_format)s function mp_grreglog(logsw,expr1_in,expr2_in) 7725 implicit none 7726 %(complex_mp_format)s TWOPII 7727 parameter (TWOPII=2.0e0_16*3.14169258478796109557151794433593750e0_16*(0.0e0_16,1.0e0_16)) 7728 %(complex_mp_format)s expr1_in,expr2_in 7729 %(complex_mp_format)s expr1,expr2 7730 %(real_mp_format)s logsw 7731 %(real_mp_format)s imagexpr 7732 logical firstsheet 7733 expr1=expr1_in 7734 expr2=expr2_in 7735 if(abs(imagpart(expr1)).eq.0.0e0_16)then 7736 expr1=cmplx(real(expr1,kind=16),0.0e0_16) 7737 endif 7738 if(abs(real(expr1,kind=16)).eq.0.0e0_16)then 7739 expr1=cmplx(0.0e0_16,imagpart(expr1)) 7740 endif 7741 if(abs(imagpart(expr2)).eq.0.0e0_16)then 7742 expr2=cmplx(real(expr2,kind=16),0.0e0_16) 7743 endif 7744 if(abs(real(expr2,kind=16)).eq.0.0e0_16)then 7745 expr2=cmplx(0.0e0_16,imagpart(expr2)) 7746 endif 7747 if(expr1.eq.(0.0e0_16,0.0e0_16))then 7748 mp_grreglog=(0.0e0_16,0.0e0_16) 7749 else 7750 imagexpr=imagpart(expr1)*imagpart(expr2) 7751 firstsheet=imagexpr.ge.0.0e0_16 7752 firstsheet=firstsheet.or.real(expr1,kind=16).ge.0.0e0_16 7753 firstsheet=firstsheet.or.real(expr2,kind=16).ge.0.0e0_16 7754 if(firstsheet)then 7755 mp_grreglog=log(expr1) 7756 else 7757 if(imagpart(expr1).gt.0.0e0_16)then 7758 mp_grreglog=log(expr1) - logsw*TWOPII 7759 else 7760 mp_grreglog=log(expr1) + logsw*TWOPII 7761 endif 7762 endif 7763 endif 7764 end 7765 7766 module mp_b0f_caching 7767 7768 type mp_b0f_node 7769 %(complex_mp_format)s p2,m12,m22 7770 %(complex_mp_format)s value 7771 type(mp_b0f_node),pointer::parent 7772 type(mp_b0f_node),pointer::left 7773 type(mp_b0f_node),pointer::right 7774 end type mp_b0f_node 7775 7776 contains 7777 7778 subroutine mp_b0f_search(item, head, find) 7779 implicit none 7780 type(mp_b0f_node),pointer,intent(inout)::head,item 7781 logical,intent(out)::find 7782 type(mp_b0f_node),pointer::item1 7783 integer::icomp 7784 find=.false. 7785 nullify(item%%parent) 7786 nullify(item%%left) 7787 nullify(item%%right) 7788 if(.not.associated(head))then 7789 head => item 7790 return 7791 endif 7792 item1 => head 7793 do 7794 icomp=mp_b0f_node_compare(item,item1) 7795 if(icomp.lt.0)then 7796 if(.not.associated(item1%%left))then 7797 item1%%left => item 7798 item%%parent => item1 7799 exit 7800 else 7801 item1 => item1%%left 7802 endif 7803 elseif(icomp.gt.0)then 7804 if(.not.associated(item1%%right))then 7805 item1%%right => item 7806 item%%parent => item1 7807 exit 7808 else 7809 item1 => item1%%right 7810 endif 7811 else 7812 find=.true. 7813 item%%value=item1%%value 7814 exit 7815 endif 7816 enddo 7817 return 7818 end 7819 7820 integer function mp_b0f_node_compare(item1,item2) result(res) 7821 implicit none 7822 type(mp_b0f_node),pointer,intent(in)::item1,item2 7823 res=mp_complex_compare(item1%%p2,item2%%p2) 7824 if(res.ne.0)return 7825 res=mp_complex_compare(item1%%m22,item2%%m22) 7826 if(res.ne.0)return 7827 res=mp_complex_compare(item1%%m12,item2%%m12) 7828 return 7829 end 7830 7831 integer function mp_real_compare(r1,r2) result(res) 7832 implicit none 7833 %(real_mp_format)s r1,r2 7834 %(real_mp_format)s maxr,diff 7835 %(real_mp_format)s tiny 7836 parameter (tiny=-1.0e-14_16) 7837 maxr=max(abs(r1),abs(r2)) 7838 diff=r1-r2 7839 if(maxr.le.1.0e-99_16.or.abs(diff)/max(maxr,1.0e-99_16).le.abs(tiny))then 7840 res=0 7841 return 7842 endif 7843 if(diff.gt.0.0e0_16)then 7844 res=1 7845 return 7846 else 7847 res=-1 7848 return 7849 endif 7850 end 7851 7852 integer function mp_complex_compare(c1,c2) result(res) 7853 implicit none 7854 %(complex_mp_format)s c1,c2 7855 %(real_mp_format)s r1,r2 7856 r1=real(c1,kind=16) 7857 r2=real(c2,kind=16) 7858 res=mp_real_compare(r1,r2) 7859 if(res.ne.0)return 7860 r1=imagpart(c1) 7861 r2=imagpart(c2) 7862 res=mp_real_compare(r1,r2) 7863 return 7864 end 7865 7866 end module mp_b0f_caching 7867 7868 %(complex_mp_format)s function mp_b0f(p2,m12,m22) 7869 use mp_b0f_caching 7870 implicit none 7871 %(complex_mp_format)s p2,m12,m22 7872 %(complex_mp_format)s zero,TWOPII 7873 parameter (zero=(0.0e0_16,0.0e0_16)) 7874 parameter (TWOPII=2.0e0_16*3.14169258478796109557151794433593750e0_16*(0.0e0_16,1.0e0_16)) 7875 %(real_mp_format)s M,M2,Ga,Ga2 7876 %(real_mp_format)s tiny 7877 parameter (tiny=-1.0e-14_16) 7878 %(complex_mp_format)s logterms 7879 %(complex_mp_format)s mp_log_trajectory 7880 logical use_caching 7881 parameter (use_caching=.true.) 7882 type(mp_b0f_node),pointer::item 7883 type(mp_b0f_node),pointer,save::b0f_bt 7884 integer init 7885 save init 7886 data init /0/ 7887 logical find 7888 IF(m12.eq.zero)THEN 7889 M=real(p2,kind=16) 7890 M2=real(m22,kind=16) 7891 IF(M.LT.tiny.OR.M2.LT.tiny)THEN 7892 WRITE(*,*)'ERROR:MP_B0F is not well defined when M^2,M2^2<0' 7893 STOP 7894 ENDIF 7895 M=sqrt(abs(M)) 7896 M2=sqrt(abs(M2)) 7897 IF(M.EQ.0.0e0_16)THEN 7898 Ga=0.0e0_16 7899 ELSE 7900 Ga=-imagpart(p2)/M 7901 ENDIF 7902 IF(M2.EQ.0.0e0_16)THEN 7903 Ga2=0.0e0_16 7904 ELSE 7905 Ga2=-imagpart(m22)/M2 7906 ENDIF 7907 IF(p2.NE.m22.AND.p2.NE.zero.AND.m22.NE.zero)THEN 7908 mp_b0f=(m22-p2)/p2*log((m22-p2)/m22) 7909 IF(M.GT.M2.AND.Ga*M2.GT.Ga2*M)THEN 7910 mp_b0f=mp_b0f-TWOPII 7911 ENDIF 7912 RETURN 7913 ELSE 7914 WRITE(*,*)'ERROR:MP_B0F is not supported for a simple form' 7915 STOP 7916 ENDIF 7917 ENDIF 7918 if(use_caching)then 7919 if(init.eq.0)then 7920 nullify(b0f_bt) 7921 init=1 7922 endif 7923 allocate(item) 7924 item%%p2=p2 7925 item%%m12=m12 7926 item%%m22=m22 7927 find=.false. 7928 call mp_b0f_search(item, b0f_bt, find) 7929 if(find)then 7930 mp_b0f=item%%value 7931 deallocate(item) 7932 return 7933 else 7934 logterms=mp_log_trajectory(100,p2,m12,m22) 7935 mp_b0f=-LOG(p2/m22)+logterms 7936 item%%value=mp_b0f 7937 return 7938 endif 7939 else 7940 logterms=mp_log_trajectory(100,p2,m12,m22) 7941 mp_b0f=-LOG(p2/m22)+logterms 7942 endif 7943 RETURN 7944 end 7945 7946 %(complex_mp_format)s function mp_sqrt_trajectory(n_seg,p2,m12,m22) 7947 implicit none 7948 integer n_seg 7949 %(complex_mp_format)s p2,m12,m22 7950 %(complex_mp_format)s zero,one 7951 parameter (zero=(0.0e0_16,0.0e0_16),one=(1.0e0_16,0.0e0_16)) 7952 %(complex_mp_format)s gamma0,gamma1 7953 %(real_mp_format)s M,Ga,dGa,Ga_start 7954 %(real_mp_format)s Gai,intersection 7955 %(complex_mp_format)s argim1,argi,p2i 7956 %(complex_mp_format)s gamma0i,gamma1i 7957 %(real_mp_format)s tiny 7958 parameter (tiny=-1.0e-24_16) 7959 integer i 7960 %(real_mp_format)s prefactor 7961 IF(ABS(p2*m12*m22).EQ.0.0e0_16)THEN 7962 WRITE(*,*)'ERROR:mp_sqrt_trajectory works when p2*m12*m22/=0' 7963 STOP 7964 ENDIF 7965 M=real(p2,kind=16) 7966 M=sqrt(abs(M)) 7967 IF(M.EQ.0.0e0_16)THEN 7968 Ga=0.0e0_16 7969 ELSE 7970 Ga=-imagpart(p2)/M 7971 ENDIF 7972 gamma0=one+m12/p2-m22/p2 7973 gamma1=m12/p2-cmplx(0.0e0_16,1.0e0_16)*abs(tiny)/p2 7974 IF(abs(Ga).EQ.0.0e0_16)THEN 7975 mp_sqrt_trajectory=sqrt(gamma0**2-4.0e0_16*gamma1) 7976 RETURN 7977 ENDIF 7978 Ga_start=-abs(tiny*Ga) 7979 dGa=(Ga-Ga_start)/n_seg 7980 prefactor=1.0e0_16 7981 Gai=Ga_start 7982 p2i=cmplx(M**2,-Gai*M) 7983 gamma0i=one+m12/p2i-m22/p2i 7984 gamma1i=m12/p2i-cmplx(0.0e0_16,1.0e0_16)*abs(tiny)/p2i 7985 argim1=gamma0i**2-4.0e0_16*gamma1i 7986 DO i=1,n_seg 7987 Gai=dGa*i+Ga_start 7988 p2i=cmplx(M**2,-Gai*M) 7989 gamma0i=one+m12/p2i-m22/p2i 7990 gamma1i=m12/p2i-cmplx(0.0e0_16,1.0e0_16)*abs(tiny)/p2i 7991 argi=gamma0i**2-4.0e0_16*gamma1i 7992 IF(imagpart(argi)*imagpart(argim1).LT.0.0e0_16)THEN 7993 intersection=imagpart(argim1)*(real(argi,kind=16)-real(argim1,kind=16)) 7994 intersection=intersection/(imagpart(argi)-imagpart(argim1)) 7995 intersection=intersection-real(argim1,kind=16) 7996 IF(intersection.GT.0.0e0_16)THEN 7997 prefactor=-prefactor 7998 ENDIF 7999 ENDIF 8000 argim1=argi 8001 ENDDO 8002 mp_sqrt_trajectory=sqrt(gamma0**2-4.0e0_16*gamma1)*prefactor 8003 RETURN 8004 end 8005 8006 %(complex_mp_format)s function mp_log_trajectory(n_seg,p2,m12,m22) 8007 implicit none 8008 integer n_seg 8009 %(complex_mp_format)s p2,m12,m22 8010 %(complex_mp_format)s zero,one,half,TWOPII 8011 parameter (zero=(0.0e0_16,0.0e0_16),one=(1.0e0_16,0.0e0_16)) 8012 parameter (half=(0.5e0_16,0.0e0_16)) 8013 parameter (TWOPII=2.0e0_16*3.14169258478796109557151794433593750e0_16*(0.0e0_16,1.0e0_16)) 8014 %(complex_mp_format)s gamma0,gammap,gammam,sqrtterm 8015 %(real_mp_format)s M,Ga,dGa,Ga_start 8016 %(real_mp_format)s Gai,intersection 8017 %(complex_mp_format)s argim1(4),argi(4),p2i,sqrttermi 8018 %(complex_mp_format)s gamma0i,gammapi,gammami 8019 %(real_mp_format)s tiny 8020 parameter (tiny=-1.0e-14_16) 8021 integer i,j 8022 %(complex_mp_format)s addfactor(4) 8023 %(complex_mp_format)s mp_sqrt_trajectory 8024 IF(abs(p2*m12*m22).eq.0.0e0_16)THEN 8025 WRITE(*,*)'ERROR:mp_log_trajectory works when p2*m12*m22/=0' 8026 STOP 8027 ENDIF 8028 M=real(p2,kind=16) 8029 M=sqrt(abs(M)) 8030 IF(M.eq.0.0e0_16)THEN 8031 Ga=0.0e0_16 8032 ELSE 8033 Ga=-imagpart(p2)/M 8034 ENDIF 8035 sqrtterm=mp_sqrt_trajectory(n_seg,p2,m12,m22) 8036 gamma0=one+m12/p2-m22/p2 8037 gammap=half*(gamma0+sqrtterm) 8038 gammam=half*(gamma0-sqrtterm) 8039 IF(abs(Ga).EQ.0.0e0_16)THEN 8040 mp_log_trajectory=-LOG(gammap-one)-LOG(gammam-one)+gammap*LOG((gammap-one)/gammap)+gammam*LOG((gammam-one)/gammam) 8041 RETURN 8042 ENDIF 8043 Ga_start=-abs(tiny*Ga) 8044 dGa=(Ga-Ga_start)/n_seg 8045 addfactor(1:4)=zero 8046 Gai=Ga_start 8047 p2i=cmplx(M**2,-Gai*M) 8048 sqrttermi=mp_sqrt_trajectory(n_seg,p2i,m12,m22) 8049 gamma0i=one+m12/p2i-m22/p2i 8050 gammapi=half*(gamma0i+sqrttermi) 8051 gammami=half*(gamma0i-sqrttermi) 8052 argim1(1)=gammapi-one 8053 argim1(2)=gammami-one 8054 argim1(3)=(gammapi-one)/gammapi 8055 argim1(4)=(gammami-one)/gammami 8056 DO i=1,n_seg 8057 Gai=dGa*i+Ga_start 8058 p2i=cmplx(M**2,-Gai*M) 8059 sqrttermi=mp_sqrt_trajectory(n_seg,p2i,m12,m22) 8060 gamma0i=one+m12/p2i-m22/p2i 8061 gammapi=half*(gamma0i+sqrttermi) 8062 gammami=half*(gamma0i-sqrttermi) 8063 argi(1)=gammapi-one 8064 argi(2)=gammami-one 8065 argi(3)=(gammapi-one)/gammapi 8066 argi(4)=(gammami-one)/gammami 8067 DO j=1,4 8068 IF(imagpart(argi(j))*imagpart(argim1(j)).LT.0.0e0_16)THEN 8069 intersection=imagpart(argim1(j))*(real(argi(j),kind=16)-real(argim1(j),kind=16)) 8070 intersection=intersection/(imagpart(argi(j))-imagpart(argim1(j))) 8071 intersection=intersection-real(argim1(j),kind=16) 8072 IF(intersection.GT.0.0e0_16)THEN 8073 IF(imagpart(argim1(j)).LT.0.0e0_16)THEN 8074 addfactor(j)=addfactor(j)-TWOPII 8075 ELSE 8076 addfactor(j)=addfactor(j)+TWOPII 8077 ENDIF 8078 ENDIF 8079 ENDIF 8080 argim1(j)=argi(j) 8081 ENDDO 8082 ENDDO 8083 mp_log_trajectory=-(LOG(gammap-one)+addfactor(1))-(LOG(gammam-one)+addfactor(2)) 8084 mp_log_trajectory=mp_log_trajectory+gammap*(LOG((gammap-one)/gammap)+addfactor(3)) 8085 mp_log_trajectory=mp_log_trajectory+gammam*(LOG((gammam-one)/gammam)+addfactor(4)) 8086 RETURN 8087 end 8088 8089 %(complex_mp_format)s function mp_arg(comnum) 8090 implicit none 8091 %(complex_mp_format)s comnum 8092 %(complex_mp_format)s imm 8093 imm = (0.0e0_16,1.0e0_16) 8094 if(comnum.eq.(0.0e0_16,0.0e0_16)) then 8095 mp_arg=(0.0e0_16,0.0e0_16) 8096 else 8097 mp_arg=log(comnum/abs(comnum))/imm 8098 endif 8099 end"""%{'complex_mp_format':self.mp_complex_format,'real_mp_format':self.mp_real_format}) 8100 8101 8102 #check for the file functions.f 8103 model_path = self.model.get('modelpath') 8104 if os.path.exists(pjoin(model_path,'Fortran','functions.f')): 8105 fsock.write_comment_line(' USER DEFINE FUNCTIONS ') 8106 input = pjoin(model_path,'Fortran','functions.f') 8107 fsock.writelines(open(input).read()) 8108 fsock.write_comment_line(' END USER DEFINE FUNCTIONS ') 8109 8110 # check for functions define in the UFO model 8111 ufo_fct = self.model.get('functions') 8112 if ufo_fct: 8113 fsock.write_comment_line(' START UFO DEFINE FUNCTIONS ') 8114 done = [] 8115 for fct in ufo_fct: 8116 # already handle by default 8117 if str(fct.name.lower()) not in ["complexconjugate", "re", "im", "sec", "csc", "asec", "acsc", "condif", 8118 "theta_function", "cond", "reglog", "reglogp", "reglogm", "recms","arg", 8119 "grreglog","regsqrt","B0F","sqrt_trajectory","log_trajectory"]: 8120 8121 ufo_fct_template = """ 8122 double complex function %(name)s(%(args)s) 8123 implicit none 8124 double complex %(args)s 8125 %(definitions)s 8126 %(name)s = %(fct)s 8127 8128 return 8129 end 8130 """ 8131 str_fct = self.p_to_f.parse(fct.expr) 8132 if not self.p_to_f.to_define: 8133 definitions = [] 8134 else: 8135 definitions=[] 8136 for d in self.p_to_f.to_define: 8137 if d == 'pi': 8138 definitions.append(' double precision pi') 8139 definitions.append(' data pi /3.1415926535897932d0/') 8140 else: 8141 definitions.append(' double complex %s' % d) 8142 8143 text = ufo_fct_template % { 8144 'name': fct.name, 8145 'args': ", ".join(fct.arguments), 8146 'fct': str_fct, 8147 'definitions': '\n'.join(definitions) 8148 } 8149 8150 fsock.writelines(text) 8151 if self.opt['mp']: 8152 fsock.write_comment_line(' START UFO DEFINE FUNCTIONS FOR MP') 8153 for fct in ufo_fct: 8154 # already handle by default 8155 if fct.name not in ["complexconjugate", "re", "im", "sec", "csc", "asec", "acsc","condif", 8156 "theta_function", "cond", "reglog", "reglogp","reglogm", "recms","arg", 8157 "grreglog","regsqrt","B0F","sqrt_trajectory","log_trajectory"]: 8158 8159 ufo_fct_template = """ 8160 %(complex_mp_format)s function mp_%(name)s(mp__%(args)s) 8161 implicit none 8162 %(complex_mp_format)s mp__%(args)s 8163 %(definitions)s 8164 mp_%(name)s = %(fct)s 8165 8166 return 8167 end 8168 """ 8169 str_fct = self.mp_p_to_f.parse(fct.expr) 8170 if not self.mp_p_to_f.to_define: 8171 definitions = [] 8172 else: 8173 definitions=[] 8174 for d in self.mp_p_to_f.to_define: 8175 if d == 'pi': 8176 definitions.append(' %s mp__pi' % self.mp_real_format) 8177 definitions.append(' data mp__pi /3.141592653589793238462643383279502884197e+00_16/') 8178 else: 8179 definitions.append(' %s mp_%s' % (self.mp_complex_format,d)) 8180 text = ufo_fct_template % { 8181 'name': fct.name, 8182 'args': ", mp__".join(fct.arguments), 8183 'fct': str_fct, 8184 'definitions': '\n'.join(definitions), 8185 'complex_mp_format': self.mp_complex_format 8186 } 8187 fsock.writelines(text) 8188 8189 8190 8191 fsock.write_comment_line(' STOP UFO DEFINE FUNCTIONS ')
8192 8193 8194
8195 - def create_makeinc(self):
8196 """create makeinc.inc containing the file to compile """ 8197 8198 fsock = self.open('makeinc.inc', comment='#') 8199 text = 'MODEL = couplings.o lha_read.o printout.o rw_para.o' 8200 text += ' model_functions.o ' 8201 8202 nb_coup_indep = 1 + len(self.coups_dep) // 25 8203 nb_coup_dep = 1 + len(self.coups_indep) // 25 8204 couplings_files=['couplings%s.o' % (i+1) \ 8205 for i in range(nb_coup_dep + nb_coup_indep) ] 8206 if self.opt['mp']: 8207 couplings_files+=['mp_couplings%s.o' % (i+1) for i in \ 8208 range(nb_coup_dep,nb_coup_dep + nb_coup_indep) ] 8209 text += ' '.join(couplings_files) 8210 fsock.writelines(text)
8211
8212 - def create_param_write(self):
8213 """ create param_write """ 8214 8215 fsock = self.open('param_write.inc', format='fortran') 8216 8217 fsock.writelines("""write(*,*) ' External Params' 8218 write(*,*) ' ---------------------------------' 8219 write(*,*) ' '""") 8220 def format(name): 8221 return 'write(*,*) \'%(name)s = \', %(name)s' % {'name': name}
8222 8223 # Write the external parameter 8224 lines = [format(param.name) for param in self.params_ext] 8225 fsock.writelines('\n'.join(lines)) 8226 8227 fsock.writelines("""write(*,*) ' Internal Params' 8228 write(*,*) ' ---------------------------------' 8229 write(*,*) ' '""") 8230 lines = [format(data.name) for data in self.params_indep 8231 if data.name != 'ZERO' and self.check_needed_param(data.name)] 8232 fsock.writelines('\n'.join(lines)) 8233 fsock.writelines("""write(*,*) ' Internal Params evaluated point by point' 8234 write(*,*) ' ----------------------------------------' 8235 write(*,*) ' '""") 8236 lines = [format(data.name) for data in self.params_dep \ 8237 if self.check_needed_param(data.name)] 8238 8239 fsock.writelines('\n'.join(lines)) 8240 8241 8242
8243 - def create_ident_card(self):
8244 """ create the ident_card.dat """ 8245 8246 def format(parameter): 8247 """return the line for the ident_card corresponding to this parameter""" 8248 colum = [parameter.lhablock.lower()] + \ 8249 [str(value) for value in parameter.lhacode] + \ 8250 [parameter.name] 8251 if not parameter.name: 8252 return '' 8253 return ' '.join(colum)+'\n'
8254 8255 fsock = self.open('ident_card.dat') 8256 8257 external_param = [format(param) for param in self.params_ext] 8258 fsock.writelines('\n'.join(external_param)) 8259
8260 - def create_actualize_mp_ext_param_inc(self):
8261 """ create the actualize_mp_ext_params.inc code """ 8262 8263 # In principle one should actualize all external, but for now, it is 8264 # hardcoded that only AS and MU_R can by dynamically changed by the user 8265 # so that we only update those ones. 8266 # Of course, to be on the safe side, one could decide to update all 8267 # external parameters. 8268 update_params_list=[p for p in self.params_ext if p.name in 8269 self.PS_dependent_key] 8270 8271 res_strings = ["%(mp_prefix)s%(name)s=%(name)s"\ 8272 %{'mp_prefix':self.mp_prefix,'name':param.name}\ 8273 for param in update_params_list] 8274 # When read_lha is false, it is G which is taken in input and not AS, so 8275 # this is what should be reset here too. 8276 if 'aS' in [param.name for param in update_params_list]: 8277 res_strings.append("%(mp_prefix)sG=G"%{'mp_prefix':self.mp_prefix}) 8278 8279 fsock = self.open('actualize_mp_ext_params.inc', format='fortran') 8280 fsock.writelines('\n'.join(res_strings))
8281
8282 - def create_param_read(self):
8283 """create param_read""" 8284 8285 if self.opt['export_format'] in ['madevent', 'FKS5_default', 'FKS5_optimized'] \ 8286 or self.opt['loop_induced']: 8287 fsock = self.open('param_read.inc', format='fortran') 8288 fsock.writelines(' include \'../param_card.inc\'') 8289 return 8290 8291 def format_line(parameter): 8292 """return the line for the ident_card corresponding to this 8293 parameter""" 8294 template = \ 8295 """ call LHA_get_real(npara,param,value,'%(name)s',%(name)s,%(value)s)""" \ 8296 % {'name': parameter.name, 8297 'value': self.p_to_f.parse(str(parameter.value.real))} 8298 if self.opt['mp']: 8299 template = template+ \ 8300 ("\n call MP_LHA_get_real(npara,param,value,'%(name)s',"+ 8301 "%(mp_prefix)s%(name)s,%(value)s)") \ 8302 % {'name': parameter.name,'mp_prefix': self.mp_prefix, 8303 'value': self.mp_p_to_f.parse(str(parameter.value.real))} 8304 8305 if parameter.lhablock.lower() == 'loop': 8306 template = template.replace('LHA_get_real', 'LHA_get_real_silent') 8307 8308 return template 8309 8310 fsock = self.open('param_read.inc', format='fortran') 8311 res_strings = [format_line(param) \ 8312 for param in self.params_ext] 8313 8314 # Correct width sign for Majorana particles (where the width 8315 # and mass need to have the same sign) 8316 for particle in self.model.get('particles'): 8317 if particle.is_fermion() and particle.get('self_antipart') and \ 8318 particle.get('width').lower() != 'zero': 8319 8320 res_strings.append('%(width)s = sign(%(width)s,%(mass)s)' % \ 8321 {'width': particle.get('width'), 'mass': particle.get('mass')}) 8322 if self.opt['mp']: 8323 res_strings.append(\ 8324 ('%(mp_pref)s%(width)s = sign(%(mp_pref)s%(width)s,'+\ 8325 '%(mp_pref)s%(mass)s)')%{'width': particle.get('width'),\ 8326 'mass': particle.get('mass'),'mp_pref':self.mp_prefix}) 8327 8328 fsock.writelines('\n'.join(res_strings)) 8329 8330 8331 @staticmethod
8332 - def create_param_card_static(model, output_path, rule_card_path=False, 8333 mssm_convert=True, write_special=True):
8334 """ create the param_card.dat for a givent model --static method-- """ 8335 #1. Check if a default param_card is present: 8336 done = False 8337 if hasattr(model, 'restrict_card') and isinstance(model.restrict_card, str): 8338 restrict_name = os.path.basename(model.restrict_card)[9:-4] 8339 model_path = model.get('modelpath') 8340 if os.path.exists(pjoin(model_path,'paramcard_%s.dat' % restrict_name)): 8341 done = True 8342 files.cp(pjoin(model_path,'paramcard_%s.dat' % restrict_name), 8343 output_path) 8344 if not done: 8345 param_writer.ParamCardWriter(model, output_path, write_special=write_special) 8346 8347 if rule_card_path: 8348 if hasattr(model, 'rule_card'): 8349 model.rule_card.write_file(rule_card_path) 8350 8351 if mssm_convert: 8352 model_name = model.get('name') 8353 # IF MSSM convert the card to SLAH1 8354 if model_name == 'mssm' or model_name.startswith('mssm-'): 8355 import models.check_param_card as translator 8356 # Check the format of the param_card for Pythia and make it correct 8357 if rule_card_path: 8358 translator.make_valid_param_card(output_path, rule_card_path) 8359 translator.convert_to_slha1(output_path)
8360
8361 - def create_param_card(self, write_special=True):
8362 """ create the param_card.dat """ 8363 8364 rule_card = pjoin(self.dir_path, 'param_card_rule.dat') 8365 if not hasattr(self.model, 'rule_card'): 8366 rule_card=False 8367 write_special = True 8368 if 'exporter' in self.opt: 8369 import madgraph.loop.loop_exporters as loop_exporters 8370 import madgraph.iolibs.export_fks as export_fks 8371 write_special = False 8372 if issubclass(self.opt['exporter'], loop_exporters.LoopProcessExporterFortranSA): 8373 write_special = True 8374 if issubclass(self.opt['exporter'],(loop_exporters.LoopInducedExporterME,export_fks.ProcessExporterFortranFKS)): 8375 write_special = False 8376 8377 self.create_param_card_static(self.model, 8378 output_path=pjoin(self.dir_path, 'param_card.dat'), 8379 rule_card_path=rule_card, 8380 mssm_convert=True, 8381 write_special=write_special)
8382
8383 -def ExportV4Factory(cmd, noclean, output_type='default', group_subprocesses=True, cmd_options={}):
8384 """ Determine which Export_v4 class is required. cmd is the command 8385 interface containing all potential usefull information. 8386 The output_type argument specifies from which context the output 8387 is called. It is 'madloop' for MadLoop5, 'amcatnlo' for FKS5 output 8388 and 'default' for tree-level outputs.""" 8389 8390 opt = dict(cmd.options) 8391 opt['output_options'] = cmd_options 8392 8393 # ========================================================================== 8394 # First check whether Ninja must be installed. 8395 # Ninja would only be required if: 8396 # a) Loop optimized output is selected 8397 # b) the process gathered from the amplitude generated use loops 8398 8399 if len(cmd._curr_amps)>0: 8400 try: 8401 curr_proc = cmd._curr_amps[0].get('process') 8402 except base_objects.PhysicsObject.PhysicsObjectError: 8403 curr_proc = None 8404 elif hasattr(cmd,'_fks_multi_proc') and \ 8405 len(cmd._fks_multi_proc.get('process_definitions'))>0: 8406 curr_proc = cmd._fks_multi_proc.get('process_definitions')[0] 8407 else: 8408 curr_proc = None 8409 8410 requires_reduction_tool = opt['loop_optimized_output'] and \ 8411 (not curr_proc is None) and \ 8412 (curr_proc.get('perturbation_couplings') != [] and \ 8413 not curr_proc.get('NLO_mode') in [None,'real','tree','LO','LOonly']) 8414 8415 # An installation is required then, but only if the specified path is the 8416 # default local one and that the Ninja library appears missing. 8417 if requires_reduction_tool: 8418 cmd.install_reduction_library() 8419 8420 # ========================================================================== 8421 # First treat the MadLoop5 standalone case 8422 MadLoop_SA_options = {'clean': not noclean, 8423 'complex_mass':cmd.options['complex_mass_scheme'], 8424 'export_format':'madloop', 8425 'mp':True, 8426 'loop_dir': os.path.join(cmd._mgme_dir,'Template','loop_material'), 8427 'cuttools_dir': cmd._cuttools_dir, 8428 'iregi_dir':cmd._iregi_dir, 8429 'golem_dir':cmd.options['golem'], 8430 'samurai_dir':cmd.options['samurai'], 8431 'ninja_dir':cmd.options['ninja'], 8432 'collier_dir':cmd.options['collier'], 8433 'fortran_compiler':cmd.options['fortran_compiler'], 8434 'f2py_compiler':cmd.options['f2py_compiler'], 8435 'output_dependencies':cmd.options['output_dependencies'], 8436 'SubProc_prefix':'P', 8437 'compute_color_flows':cmd.options['loop_color_flows'], 8438 'mode': 'reweight' if cmd._export_format == "standalone_rw" else '', 8439 'cluster_local_path': cmd.options['cluster_local_path'], 8440 'output_options': cmd_options 8441 } 8442 8443 if output_type.startswith('madloop'): 8444 import madgraph.loop.loop_exporters as loop_exporters 8445 if os.path.isdir(os.path.join(cmd._mgme_dir, 'Template/loop_material')): 8446 ExporterClass=None 8447 if not cmd.options['loop_optimized_output']: 8448 ExporterClass=loop_exporters.LoopProcessExporterFortranSA 8449 else: 8450 if output_type == "madloop": 8451 ExporterClass=loop_exporters.LoopProcessOptimizedExporterFortranSA 8452 MadLoop_SA_options['export_format'] = 'madloop_optimized' 8453 elif output_type == "madloop_matchbox": 8454 ExporterClass=loop_exporters.LoopProcessExporterFortranMatchBox 8455 MadLoop_SA_options['export_format'] = 'madloop_matchbox' 8456 else: 8457 raise Exception("output_type not recognize %s" % output_type) 8458 return ExporterClass(cmd._export_dir, MadLoop_SA_options) 8459 else: 8460 raise MadGraph5Error('MG5_aMC cannot find the \'loop_material\' directory'+\ 8461 ' in %s'%str(cmd._mgme_dir)) 8462 8463 # Then treat the aMC@NLO output 8464 elif output_type=='amcatnlo': 8465 import madgraph.iolibs.export_fks as export_fks 8466 ExporterClass=None 8467 amcatnlo_options = dict(opt) 8468 amcatnlo_options.update(MadLoop_SA_options) 8469 amcatnlo_options['mp'] = len(cmd._fks_multi_proc.get_virt_amplitudes()) > 0 8470 if not cmd.options['loop_optimized_output']: 8471 logger.info("Writing out the aMC@NLO code") 8472 ExporterClass = export_fks.ProcessExporterFortranFKS 8473 amcatnlo_options['export_format']='FKS5_default' 8474 else: 8475 logger.info("Writing out the aMC@NLO code, using optimized Loops") 8476 ExporterClass = export_fks.ProcessOptimizedExporterFortranFKS 8477 amcatnlo_options['export_format']='FKS5_optimized' 8478 return ExporterClass(cmd._export_dir, amcatnlo_options) 8479 8480 8481 # Then the default tree-level output 8482 elif output_type=='default': 8483 assert group_subprocesses in [True, False] 8484 8485 opt = dict(opt) 8486 opt.update({'clean': not noclean, 8487 'complex_mass': cmd.options['complex_mass_scheme'], 8488 'export_format':cmd._export_format, 8489 'mp': False, 8490 'sa_symmetry':False, 8491 'model': cmd._curr_model.get('name'), 8492 'v5_model': False if cmd._model_v4_path else True }) 8493 8494 format = cmd._export_format #shortcut 8495 8496 if format in ['standalone_msP', 'standalone_msF', 'standalone_rw']: 8497 opt['sa_symmetry'] = True 8498 elif format == 'plugin': 8499 opt['sa_symmetry'] = cmd._export_plugin.sa_symmetry 8500 8501 loop_induced_opt = dict(opt) 8502 loop_induced_opt.update(MadLoop_SA_options) 8503 loop_induced_opt['export_format'] = 'madloop_optimized' 8504 loop_induced_opt['SubProc_prefix'] = 'PV' 8505 # For loop_induced output with MadEvent, we must have access to the 8506 # color flows. 8507 loop_induced_opt['compute_color_flows'] = True 8508 for key in opt: 8509 if key not in loop_induced_opt: 8510 loop_induced_opt[key] = opt[key] 8511 8512 # Madevent output supports MadAnalysis5 8513 if format in ['madevent']: 8514 opt['madanalysis5'] = cmd.options['madanalysis5_path'] 8515 8516 if format == 'matrix' or format.startswith('standalone'): 8517 return ProcessExporterFortranSA(cmd._export_dir, opt, format=format) 8518 8519 elif format in ['madevent'] and group_subprocesses: 8520 if isinstance(cmd._curr_amps[0], 8521 loop_diagram_generation.LoopAmplitude): 8522 import madgraph.loop.loop_exporters as loop_exporters 8523 return loop_exporters.LoopInducedExporterMEGroup( 8524 cmd._export_dir,loop_induced_opt) 8525 else: 8526 return ProcessExporterFortranMEGroup(cmd._export_dir,opt) 8527 elif format in ['madevent']: 8528 if isinstance(cmd._curr_amps[0], 8529 loop_diagram_generation.LoopAmplitude): 8530 import madgraph.loop.loop_exporters as loop_exporters 8531 return loop_exporters.LoopInducedExporterMENoGroup( 8532 cmd._export_dir,loop_induced_opt) 8533 else: 8534 return ProcessExporterFortranME(cmd._export_dir,opt) 8535 elif format in ['matchbox']: 8536 return ProcessExporterFortranMatchBox(cmd._export_dir,opt) 8537 elif cmd._export_format in ['madweight'] and group_subprocesses: 8538 8539 return ProcessExporterFortranMWGroup(cmd._export_dir, opt) 8540 elif cmd._export_format in ['madweight']: 8541 return ProcessExporterFortranMW(cmd._export_dir, opt) 8542 elif format == 'plugin': 8543 if isinstance(cmd._curr_amps[0], 8544 loop_diagram_generation.LoopAmplitude): 8545 return cmd._export_plugin(cmd._export_dir, loop_induced_opt) 8546 else: 8547 return cmd._export_plugin(cmd._export_dir, opt) 8548 8549 else: 8550 raise Exception('Wrong export_v4 format') 8551 else: 8552 raise MadGraph5Error('Output type %s not reckognized in ExportV4Factory.')
8553
8554 8555 8556 8557 #=============================================================================== 8558 # ProcessExporterFortranMWGroup 8559 #=============================================================================== 8560 -class ProcessExporterFortranMWGroup(ProcessExporterFortranMW):
8561 """Class to take care of exporting a set of matrix elements to 8562 MadEvent subprocess group format.""" 8563 8564 matrix_file = "matrix_madweight_group_v4.inc" 8565 grouped_mode = 'madweight' 8566 #=========================================================================== 8567 # generate_subprocess_directory 8568 #===========================================================================
8569 - def generate_subprocess_directory(self, subproc_group, 8570 fortran_model, 8571 group_number):
8572 """Generate the Pn directory for a subprocess group in MadEvent, 8573 including the necessary matrix_N.f files, configs.inc and various 8574 other helper files.""" 8575 8576 if not isinstance(subproc_group, group_subprocs.SubProcessGroup): 8577 raise base_objects.PhysicsObject.PhysicsObjectError("subproc_group object not SubProcessGroup") 8578 8579 if not self.model: 8580 self.model = subproc_group.get('matrix_elements')[0].\ 8581 get('processes')[0].get('model') 8582 8583 pathdir = os.path.join(self.dir_path, 'SubProcesses') 8584 8585 # Create the directory PN in the specified path 8586 subprocdir = "P%d_%s" % (subproc_group.get('number'), 8587 subproc_group.get('name')) 8588 try: 8589 os.mkdir(pjoin(pathdir, subprocdir)) 8590 except os.error as error: 8591 logger.warning(error.strerror + " " + subprocdir) 8592 8593 8594 logger.info('Creating files in directory %s' % subprocdir) 8595 Ppath = pjoin(pathdir, subprocdir) 8596 8597 # Create the matrix.f files, auto_dsig.f files and all inc files 8598 # for all subprocesses in the group 8599 8600 maxamps = 0 8601 maxflows = 0 8602 tot_calls = 0 8603 8604 matrix_elements = subproc_group.get('matrix_elements') 8605 8606 for ime, matrix_element in \ 8607 enumerate(matrix_elements): 8608 filename = pjoin(Ppath, 'matrix%d.f' % (ime+1)) 8609 calls, ncolor = \ 8610 self.write_matrix_element_v4(writers.FortranWriter(filename), 8611 matrix_element, 8612 fortran_model, 8613 str(ime+1), 8614 subproc_group.get('diagram_maps')[\ 8615 ime]) 8616 8617 filename = pjoin(Ppath, 'auto_dsig%d.f' % (ime+1)) 8618 self.write_auto_dsig_file(writers.FortranWriter(filename), 8619 matrix_element, 8620 str(ime+1)) 8621 8622 # Keep track of needed quantities 8623 tot_calls += int(calls) 8624 maxflows = max(maxflows, ncolor) 8625 maxamps = max(maxamps, len(matrix_element.get('diagrams'))) 8626 8627 # Draw diagrams 8628 filename = pjoin(Ppath, "matrix%d.ps" % (ime+1)) 8629 plot = draw.MultiEpsDiagramDrawer(matrix_element.get('base_amplitude').\ 8630 get('diagrams'), 8631 filename, 8632 model = \ 8633 matrix_element.get('processes')[0].\ 8634 get('model'), 8635 amplitude=True) 8636 logger.info("Generating Feynman diagrams for " + \ 8637 matrix_element.get('processes')[0].nice_string()) 8638 plot.draw() 8639 8640 # Extract number of external particles 8641 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 8642 8643 # Generate a list of diagrams corresponding to each configuration 8644 # [[d1, d2, ...,dn],...] where 1,2,...,n is the subprocess number 8645 # If a subprocess has no diagrams for this config, the number is 0 8646 8647 subproc_diagrams_for_config = subproc_group.get('diagrams_for_configs') 8648 8649 filename = pjoin(Ppath, 'auto_dsig.f') 8650 self.write_super_auto_dsig_file(writers.FortranWriter(filename), 8651 subproc_group) 8652 8653 filename = pjoin(Ppath,'configs.inc') 8654 nconfigs, s_and_t_channels = self.write_configs_file(\ 8655 writers.FortranWriter(filename), 8656 subproc_group, 8657 subproc_diagrams_for_config) 8658 8659 filename = pjoin(Ppath, 'leshouche.inc') 8660 self.write_leshouche_file(writers.FortranWriter(filename), 8661 subproc_group) 8662 8663 filename = pjoin(Ppath, 'phasespace.inc') 8664 self.write_phasespace_file(writers.FortranWriter(filename), 8665 nconfigs) 8666 8667 8668 filename = pjoin(Ppath, 'maxamps.inc') 8669 self.write_maxamps_file(writers.FortranWriter(filename), 8670 maxamps, 8671 maxflows, 8672 max([len(me.get('processes')) for me in \ 8673 matrix_elements]), 8674 len(matrix_elements)) 8675 8676 filename = pjoin(Ppath, 'mirrorprocs.inc') 8677 self.write_mirrorprocs(writers.FortranWriter(filename), 8678 subproc_group) 8679 8680 filename = pjoin(Ppath, 'nexternal.inc') 8681 self.write_nexternal_file(writers.FortranWriter(filename), 8682 nexternal, ninitial) 8683 8684 filename = pjoin(Ppath, 'pmass.inc') 8685 self.write_pmass_file(writers.FortranWriter(filename), 8686 matrix_element) 8687 8688 filename = pjoin(Ppath, 'props.inc') 8689 self.write_props_file(writers.FortranWriter(filename), 8690 matrix_element, 8691 s_and_t_channels) 8692 8693 # filename = pjoin(Ppath, 'processes.dat') 8694 # files.write_to_file(filename, 8695 # self.write_processes_file, 8696 # subproc_group) 8697 8698 # Generate jpgs -> pass in make_html 8699 #os.system(os.path.join('..', '..', 'bin', 'gen_jpeg-pl')) 8700 8701 linkfiles = ['driver.f', 'cuts.f', 'initialization.f','gen_ps.f', 'makefile', 'coupl.inc','madweight_param.inc', 'run.inc', 'setscales.f'] 8702 8703 for file in linkfiles: 8704 ln('../%s' % file, cwd=Ppath) 8705 8706 ln('nexternal.inc', '../../Source', cwd=Ppath, log=False) 8707 ln('leshouche.inc', '../../Source', cwd=Ppath, log=False) 8708 ln('maxamps.inc', '../../Source', cwd=Ppath, log=False) 8709 ln('../../Source/maxparticles.inc', '.', log=True, cwd=Ppath) 8710 ln('../../Source/maxparticles.inc', '.', name='genps.inc', log=True, cwd=Ppath) 8711 ln('phasespace.inc', '../', log=True, cwd=Ppath) 8712 if not tot_calls: 8713 tot_calls = 0 8714 return tot_calls
8715 8716 8717 #=========================================================================== 8718 # Helper functions 8719 #===========================================================================
8720 - def modify_grouping(self, matrix_element):
8721 """allow to modify the grouping (if grouping is in place) 8722 return two value: 8723 - True/False if the matrix_element was modified 8724 - the new(or old) matrix element""" 8725 8726 return True, matrix_element.split_lepton_grouping()
8727 8728 #=========================================================================== 8729 # write_super_auto_dsig_file 8730 #===========================================================================
8731 - def write_super_auto_dsig_file(self, writer, subproc_group):
8732 """Write the auto_dsig.f file selecting between the subprocesses 8733 in subprocess group mode""" 8734 8735 replace_dict = {} 8736 8737 # Extract version number and date from VERSION file 8738 info_lines = self.get_mg5_info_lines() 8739 replace_dict['info_lines'] = info_lines 8740 8741 matrix_elements = subproc_group.get('matrix_elements') 8742 8743 # Extract process info lines 8744 process_lines = '\n'.join([self.get_process_info_lines(me) for me in \ 8745 matrix_elements]) 8746 replace_dict['process_lines'] = process_lines 8747 8748 nexternal, ninitial = matrix_elements[0].get_nexternal_ninitial() 8749 replace_dict['nexternal'] = nexternal 8750 8751 replace_dict['nsprocs'] = 2*len(matrix_elements) 8752 8753 # Generate dsig definition line 8754 dsig_def_line = "DOUBLE PRECISION " + \ 8755 ",".join(["DSIG%d" % (iproc + 1) for iproc in \ 8756 range(len(matrix_elements))]) 8757 replace_dict["dsig_def_line"] = dsig_def_line 8758 8759 # Generate dsig process lines 8760 call_dsig_proc_lines = [] 8761 for iproc in range(len(matrix_elements)): 8762 call_dsig_proc_lines.append(\ 8763 "IF(IPROC.EQ.%(num)d) DSIGPROC=DSIG%(num)d(P1,WGT,IMODE) ! %(proc)s" % \ 8764 {"num": iproc + 1, 8765 "proc": matrix_elements[iproc].get('processes')[0].base_string()}) 8766 replace_dict['call_dsig_proc_lines'] = "\n".join(call_dsig_proc_lines) 8767 8768 if writer: 8769 file = open(os.path.join(_file_path, \ 8770 'iolibs/template_files/super_auto_dsig_mw_group_v4.inc')).read() 8771 file = file % replace_dict 8772 # Write the file 8773 writer.writelines(file) 8774 else: 8775 return replace_dict
8776 8777 #=========================================================================== 8778 # write_mirrorprocs 8779 #===========================================================================
8780 - def write_mirrorprocs(self, writer, subproc_group):
8781 """Write the mirrorprocs.inc file determining which processes have 8782 IS mirror process in subprocess group mode.""" 8783 8784 lines = [] 8785 bool_dict = {True: '.true.', False: '.false.'} 8786 matrix_elements = subproc_group.get('matrix_elements') 8787 lines.append("DATA (MIRRORPROCS(I),I=1,%d)/%s/" % \ 8788 (len(matrix_elements), 8789 ",".join([bool_dict[me.get('has_mirror_process')] for \ 8790 me in matrix_elements]))) 8791 # Write the file 8792 writer.writelines(lines)
8793 8794 #=========================================================================== 8795 # write_configs_file 8796 #===========================================================================
8797 - def write_configs_file(self, writer, subproc_group, diagrams_for_config):
8798 """Write the configs.inc file with topology information for a 8799 subprocess group. Use the first subprocess with a diagram for each 8800 configuration.""" 8801 8802 matrix_elements = subproc_group.get('matrix_elements') 8803 model = matrix_elements[0].get('processes')[0].get('model') 8804 8805 diagrams = [] 8806 config_numbers = [] 8807 for iconfig, config in enumerate(diagrams_for_config): 8808 # Check if any diagrams correspond to this config 8809 if set(config) == set([0]): 8810 continue 8811 subproc_diags = [] 8812 for s,d in enumerate(config): 8813 if d: 8814 subproc_diags.append(matrix_elements[s].\ 8815 get('diagrams')[d-1]) 8816 else: 8817 subproc_diags.append(None) 8818 diagrams.append(subproc_diags) 8819 config_numbers.append(iconfig + 1) 8820 8821 # Extract number of external particles 8822 (nexternal, ninitial) = subproc_group.get_nexternal_ninitial() 8823 8824 return len(diagrams), \ 8825 self.write_configs_file_from_diagrams(writer, diagrams, 8826 config_numbers, 8827 nexternal, ninitial, 8828 matrix_elements[0],model)
8829 8830 #=========================================================================== 8831 # write_run_configs_file 8832 #===========================================================================
8833 - def write_run_config_file(self, writer):
8834 """Write the run_configs.inc file for MadEvent""" 8835 8836 path = os.path.join(_file_path,'iolibs','template_files','madweight_run_config.inc') 8837 text = open(path).read() % {'chanperjob':'2'} 8838 writer.write(text) 8839 return True
8840 8841 8842 #=========================================================================== 8843 # write_leshouche_file 8844 #===========================================================================
8845 - def write_leshouche_file(self, writer, subproc_group):
8846 """Write the leshouche.inc file for MG4""" 8847 8848 all_lines = [] 8849 8850 for iproc, matrix_element in \ 8851 enumerate(subproc_group.get('matrix_elements')): 8852 all_lines.extend(self.get_leshouche_lines(matrix_element, 8853 iproc)) 8854 8855 # Write the file 8856 writer.writelines(all_lines) 8857 8858 return True
8859