Package madgraph :: Package interface :: Module amcatnlo_run_interface
[hide private]
[frames] | no frames]

Source Code for Module madgraph.interface.amcatnlo_run_interface

   1  ################################################################################ 
   2  # 
   3  # Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors 
   4  # 
   5  # This file is a part of the MadGraph5_aMC@NLO project, an application which  
   6  # automatically generates Feynman diagrams and matrix elements for arbitrary 
   7  # high-energy processes in the Standard Model and beyond. 
   8  # 
   9  # It is subject to the MadGraph5_aMC@NLO license which should accompany this  
  10  # distribution. 
  11  # 
  12  # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch 
  13  # 
  14  ################################################################################ 
  15  """A user friendly command line interface to access MadGraph5_aMC@NLO features. 
  16     Uses the cmd package for command interpretation and tab completion. 
  17  """ 
  18  from __future__ import division 
  19   
  20  from __future__ import absolute_import 
  21  from __future__ import print_function 
  22  import atexit 
  23  import glob 
  24  import logging 
  25  import math 
  26  import optparse 
  27  import os 
  28  import pydoc 
  29  import random 
  30  import re 
  31  import shutil 
  32  import subprocess 
  33  import sys 
  34  import traceback 
  35  import time 
  36  import signal 
  37  import tarfile 
  38  import copy 
  39  import datetime 
  40  import tarfile 
  41  import traceback 
  42  import six 
  43  StringIO = six 
  44  from six.moves import range 
  45  from six.moves import zip 
  46  try: 
  47      import cpickle as pickle 
  48  except: 
  49      import pickle 
  50   
  51  try: 
  52      import readline 
  53      GNU_SPLITTING = ('GNU' in readline.__doc__) 
  54  except: 
  55      GNU_SPLITTING = True 
  56   
  57  root_path = os.path.split(os.path.dirname(os.path.realpath( __file__ )))[0] 
  58  root_path = os.path.split(root_path)[0] 
  59  sys.path.insert(0, os.path.join(root_path,'bin')) 
  60   
  61  # usefull shortcut 
  62  pjoin = os.path.join 
  63  # Special logger for the Cmd Interface 
  64  logger = logging.getLogger('madgraph.stdout') # -> stdout 
  65  logger_stderr = logging.getLogger('madgraph.stderr') # ->stderr 
  66    
  67  try: 
  68      import madgraph 
  69  except ImportError:  
  70      aMCatNLO = True  
  71      import internal.extended_cmd as cmd 
  72      import internal.common_run_interface as common_run 
  73      import internal.banner as banner_mod 
  74      import internal.misc as misc     
  75      from internal import InvalidCmd, MadGraph5Error 
  76      import internal.files as files 
  77      import internal.cluster as cluster 
  78      import internal.save_load_object as save_load_object 
  79      import internal.gen_crossxhtml as gen_crossxhtml 
  80      import internal.sum_html as sum_html 
  81      import internal.shower_card as shower_card 
  82      import internal.FO_analyse_card as analyse_card  
  83      import internal.lhe_parser as lhe_parser 
  84  else: 
  85      # import from madgraph directory 
  86      aMCatNLO = False 
  87      import madgraph.interface.extended_cmd as cmd 
  88      import madgraph.interface.common_run_interface as common_run 
  89      import madgraph.iolibs.files as files 
  90      import madgraph.iolibs.save_load_object as save_load_object 
  91      import madgraph.madevent.gen_crossxhtml as gen_crossxhtml 
  92      import madgraph.madevent.sum_html as sum_html 
  93      import madgraph.various.banner as banner_mod 
  94      import madgraph.various.cluster as cluster 
  95      import madgraph.various.misc as misc 
  96      import madgraph.various.shower_card as shower_card 
  97      import madgraph.various.FO_analyse_card as analyse_card 
  98      import madgraph.various.lhe_parser as lhe_parser 
  99      from madgraph import InvalidCmd, aMCatNLOError, MadGraph5Error,MG5DIR 
100 101 -class aMCatNLOError(Exception):
102 pass
103
104 105 -def compile_dir(*arguments):
106 """compile the direcory p_dir 107 arguments is the tuple (me_dir, p_dir, mode, options, tests, exe, run_mode) 108 this function needs not to be a class method in order to do 109 the compilation on multicore""" 110 111 if len(arguments) == 1: 112 (me_dir, p_dir, mode, options, tests, exe, run_mode) = arguments[0] 113 elif len(arguments)==7: 114 (me_dir, p_dir, mode, options, tests, exe, run_mode) = arguments 115 else: 116 raise aMCatNLOError('Wrong number of arguments') 117 logger.info(' Compiling %s...' % p_dir) 118 119 this_dir = pjoin(me_dir, 'SubProcesses', p_dir) 120 121 try: 122 #compile everything 123 # compile and run tests 124 for test in tests: 125 # skip check_poles for LOonly dirs 126 if test == 'check_poles' and os.path.exists(pjoin(this_dir, 'parton_lum_0.f')): 127 continue 128 if test == 'test_ME' or test == 'test_MC': 129 test_exe='test_soft_col_limits' 130 else: 131 test_exe=test 132 misc.compile([test_exe], cwd = this_dir, job_specs = False) 133 input = pjoin(me_dir, '%s_input.txt' % test) 134 #this can be improved/better written to handle the output 135 misc.call(['./%s' % (test_exe)], cwd=this_dir, 136 stdin = open(input), stdout=open(pjoin(this_dir, '%s.log' % test), 'w'), 137 close_fds=True) 138 if test == 'check_poles' and os.path.exists(pjoin(this_dir,'MadLoop5_resources')) : 139 tf=tarfile.open(pjoin(this_dir,'MadLoop5_resources.tar.gz'),'w:gz', 140 dereference=True) 141 tf.add(pjoin(this_dir,'MadLoop5_resources'),arcname='MadLoop5_resources') 142 tf.close() 143 144 if not options['reweightonly']: 145 misc.compile(['gensym'], cwd=this_dir, job_specs = False) 146 misc.call(['./gensym', mode],cwd= this_dir, 147 stdout=open(pjoin(this_dir, 'gensym.log'), 'w'), 148 close_fds=True) 149 #compile madevent_mintMC/mintFO 150 misc.compile([exe], cwd=this_dir, job_specs = False) 151 if mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 152 misc.compile(['reweight_xsec_events'], cwd=this_dir, job_specs = False) 153 154 logger.info(' %s done.' % p_dir) 155 return 0 156 except MadGraph5Error as msg: 157 return msg
158
159 160 -def check_compiler(options, block=False):
161 """check that the current fortran compiler is gfortran 4.6 or later. 162 If block, stops the execution, otherwise just print a warning""" 163 164 msg = 'In order to be able to run MadGraph5_aMC@NLO at NLO, you need to have ' + \ 165 'gfortran 4.6 or later installed.\n%s has been detected.\n'+\ 166 'Note that you can still run MadEvent (at LO) without any problem!' 167 #first check that gfortran is installed 168 if options['fortran_compiler']: 169 compiler = options['fortran_compiler'] 170 elif misc.which('gfortran'): 171 compiler = 'gfortran' 172 else: 173 compiler = '' 174 175 if 'gfortran' not in compiler: 176 if block: 177 raise aMCatNLOError(msg % compiler) 178 else: 179 logger.warning(msg % compiler) 180 else: 181 curr_version = misc.get_gfortran_version(compiler) 182 curr_version = curr_version.split('.') 183 if len(curr_version) == 1: 184 curr_version.append(0) 185 186 if int(curr_version[0]) < 5: 187 if int(curr_version[0]) == 4 and int(curr_version[1]) > 5: 188 return 189 if block: 190 raise aMCatNLOError(msg % (compiler + ' ' + curr_version)) 191 else: 192 logger.warning(msg % (compiler + ' ' + curr_version))
193
194 195 196 #=============================================================================== 197 # CmdExtended 198 #=============================================================================== 199 -class CmdExtended(common_run.CommonRunCmd):
200 """Particularisation of the cmd command for aMCatNLO""" 201 202 #suggested list of command 203 next_possibility = { 204 'start': [], 205 } 206 207 debug_output = 'ME5_debug' 208 error_debug = 'Please report this bug on https://bugs.launchpad.net/mg5amcnlo\n' 209 error_debug += 'More information is found in \'%(debug)s\'.\n' 210 error_debug += 'Please attach this file to your report.' 211 212 config_debug = 'If you need help with this issue, please, contact us on https://answers.launchpad.net/mg5amcnlo\n' 213 214 215 keyboard_stop_msg = """stopping all operation 216 in order to quit MadGraph5_aMC@NLO please enter exit""" 217 218 # Define the Error 219 InvalidCmd = InvalidCmd 220 ConfigurationError = aMCatNLOError 221
222 - def __init__(self, me_dir, options, *arg, **opt):
223 """Init history and line continuation""" 224 225 # Tag allowing/forbiding question 226 self.force = False 227 228 # If possible, build an info line with current version number 229 # and date, from the VERSION text file 230 info = misc.get_pkg_info() 231 info_line = "" 232 if info and 'version' in info and 'date' in info: 233 len_version = len(info['version']) 234 len_date = len(info['date']) 235 if len_version + len_date < 30: 236 info_line = "#* VERSION %s %s %s *\n" % \ 237 (info['version'], 238 (30 - len_version - len_date) * ' ', 239 info['date']) 240 else: 241 version = open(pjoin(root_path,'MGMEVersion.txt')).readline().strip() 242 info_line = "#* VERSION %s %s *\n" % \ 243 (version, (24 - len(version)) * ' ') 244 245 # Create a header for the history file. 246 # Remember to fill in time at writeout time! 247 self.history_header = \ 248 '#************************************************************\n' + \ 249 '#* MadGraph5_aMC@NLO *\n' + \ 250 '#* *\n' + \ 251 "#* * * *\n" + \ 252 "#* * * * * *\n" + \ 253 "#* * * * * 5 * * * * *\n" + \ 254 "#* * * * * *\n" + \ 255 "#* * * *\n" + \ 256 "#* *\n" + \ 257 "#* *\n" + \ 258 info_line + \ 259 "#* *\n" + \ 260 "#* The MadGraph5_aMC@NLO Development Team - Find us at *\n" + \ 261 "#* https://server06.fynu.ucl.ac.be/projects/madgraph *\n" + \ 262 "#* and *\n" + \ 263 "#* http://amcatnlo.cern.ch *\n" + \ 264 '#* *\n' + \ 265 '#************************************************************\n' + \ 266 '#* *\n' + \ 267 '#* Command File for aMCatNLO *\n' + \ 268 '#* *\n' + \ 269 '#* run as ./bin/aMCatNLO.py filename *\n' + \ 270 '#* *\n' + \ 271 '#************************************************************\n' 272 273 if info_line: 274 info_line = info_line[1:] 275 276 logger.info(\ 277 "************************************************************\n" + \ 278 "* *\n" + \ 279 "* W E L C O M E to M A D G R A P H 5 *\n" + \ 280 "* a M C @ N L O *\n" + \ 281 "* *\n" + \ 282 "* * * *\n" + \ 283 "* * * * * *\n" + \ 284 "* * * * * 5 * * * * *\n" + \ 285 "* * * * * *\n" + \ 286 "* * * *\n" + \ 287 "* *\n" + \ 288 info_line + \ 289 "* *\n" + \ 290 "* The MadGraph5_aMC@NLO Development Team - Find us at *\n" + \ 291 "* http://amcatnlo.cern.ch *\n" + \ 292 "* *\n" + \ 293 "* Type 'help' for in-line help. *\n" + \ 294 "* *\n" + \ 295 "************************************************************") 296 super(CmdExtended, self).__init__(me_dir, options, *arg, **opt)
297 298
299 - def get_history_header(self):
300 """return the history header""" 301 return self.history_header % misc.get_time_info()
302
303 - def stop_on_keyboard_stop(self):
304 """action to perform to close nicely on a keyboard interupt""" 305 try: 306 if hasattr(self, 'cluster'): 307 logger.info('rm jobs on queue') 308 self.cluster.remove() 309 if hasattr(self, 'results'): 310 self.update_status('Stop by the user', level=None, makehtml=True, error=True) 311 self.add_error_log_in_html(KeyboardInterrupt) 312 except: 313 pass
314
315 - def postcmd(self, stop, line):
316 """ Update the status of the run for finishing interactive command """ 317 318 # relaxing the tag forbidding question 319 self.force = False 320 321 if not self.use_rawinput: 322 return stop 323 324 325 arg = line.split() 326 if len(arg) == 0: 327 return stop 328 elif str(arg[0]) in ['exit','quit','EOF']: 329 return stop 330 331 try: 332 self.update_status('Command \'%s\' done.<br> Waiting for instruction.' % arg[0], 333 level=None, error=True) 334 except Exception: 335 misc.sprint('self.update_status fails', log=logger) 336 pass
337
338 - def nice_user_error(self, error, line):
339 """If a ME run is currently running add a link in the html output""" 340 341 self.add_error_log_in_html() 342 cmd.Cmd.nice_user_error(self, error, line)
343
344 - def nice_config_error(self, error, line):
345 """If a ME run is currently running add a link in the html output""" 346 347 self.add_error_log_in_html() 348 cmd.Cmd.nice_config_error(self, error, line)
349
350 - def nice_error_handling(self, error, line):
351 """If a ME run is currently running add a link in the html output""" 352 353 self.add_error_log_in_html() 354 cmd.Cmd.nice_error_handling(self, error, line)
355
356 357 358 #=============================================================================== 359 # HelpToCmd 360 #=============================================================================== 361 -class HelpToCmd(object):
362 """ The Series of help routine for the aMCatNLOCmd""" 363
364 - def help_launch(self):
365 """help for launch command""" 366 _launch_parser.print_help()
367
368 - def help_banner_run(self):
369 logger.info("syntax: banner_run Path|RUN [--run_options]") 370 logger.info("-- Reproduce a run following a given banner") 371 logger.info(" One of the following argument is require:") 372 logger.info(" Path should be the path of a valid banner.") 373 logger.info(" RUN should be the name of a run of the current directory") 374 self.run_options_help([('-f','answer all question by default'), 375 ('--name=X', 'Define the name associated with the new run')])
376 377
378 - def help_compile(self):
379 """help for compile command""" 380 _compile_parser.print_help()
381
382 - def help_generate_events(self):
383 """help for generate_events commandi 384 just call help_launch""" 385 _generate_events_parser.print_help()
386 387
388 - def help_calculate_xsect(self):
389 """help for generate_events command""" 390 _calculate_xsect_parser.print_help()
391
392 - def help_shower(self):
393 """help for shower command""" 394 _shower_parser.print_help()
395 396
397 - def help_open(self):
398 logger.info("syntax: open FILE ") 399 logger.info("-- open a file with the appropriate editor.") 400 logger.info(' If FILE belongs to index.html, param_card.dat, run_card.dat') 401 logger.info(' the path to the last created/used directory is used')
402
403 - def run_options_help(self, data):
404 if data: 405 logger.info('-- local options:') 406 for name, info in data: 407 logger.info(' %s : %s' % (name, info)) 408 409 logger.info("-- session options:") 410 logger.info(" Note that those options will be kept for the current session") 411 logger.info(" --cluster : Submit to the cluster. Current cluster: %s" % self.options['cluster_type']) 412 logger.info(" --multicore : Run in multi-core configuration") 413 logger.info(" --nb_core=X : limit the number of core to use to X.")
414
415 416 417 418 #=============================================================================== 419 # CheckValidForCmd 420 #=============================================================================== 421 -class CheckValidForCmd(object):
422 """ The Series of check routine for the aMCatNLOCmd""" 423
424 - def check_shower(self, args, options):
425 """Check the validity of the line. args[0] is the run_directory""" 426 427 if options['force']: 428 self.force = True 429 430 if len(args) == 0: 431 self.help_shower() 432 raise self.InvalidCmd('Invalid syntax, please specify the run name') 433 if not os.path.isdir(pjoin(self.me_dir, 'Events', args[0])): 434 raise self.InvalidCmd('Directory %s does not exists' % \ 435 pjoin(os.getcwd(), 'Events', args[0])) 436 437 self.set_run_name(args[0], level= 'shower') 438 args[0] = pjoin(self.me_dir, 'Events', args[0])
439
440 - def check_plot(self, args):
441 """Check the argument for the plot command 442 plot run_name modes""" 443 444 445 madir = self.options['madanalysis_path'] 446 td = self.options['td_path'] 447 448 if not madir or not td: 449 logger.info('Retry to read configuration file to find madanalysis/td') 450 self.set_configuration() 451 452 madir = self.options['madanalysis_path'] 453 td = self.options['td_path'] 454 455 if not madir: 456 error_msg = 'No valid Madanalysis path set.' 457 error_msg += 'Please use the set command to define the path and retry.' 458 error_msg += 'You can also define it in the configuration file.' 459 raise self.InvalidCmd(error_msg) 460 if not td: 461 error_msg = 'No valid path to your topdrawer directory set.' 462 error_msg += 'Please use the set command to define the path and retry.' 463 error_msg += 'You can also define it in the configuration file.' 464 raise self.InvalidCmd(error_msg) 465 466 if len(args) == 0: 467 if not hasattr(self, 'run_name') or not self.run_name: 468 self.help_plot() 469 raise self.InvalidCmd('No run name defined. Please add this information.') 470 args.append('all') 471 return 472 473 474 if args[0] not in self._plot_mode: 475 self.set_run_name(args[0], level='plot') 476 del args[0] 477 if len(args) == 0: 478 args.append('all') 479 elif not self.run_name: 480 self.help_plot() 481 raise self.InvalidCmd('No run name defined. Please add this information.') 482 483 for arg in args: 484 if arg not in self._plot_mode and arg != self.run_name: 485 self.help_plot() 486 raise self.InvalidCmd('unknown options %s' % arg)
487
488 - def check_pgs(self, arg):
489 """Check the argument for pythia command 490 syntax: pgs [NAME] 491 Note that other option are already remove at this point 492 """ 493 494 # If not pythia-pgs path 495 if not self.options['pythia-pgs_path']: 496 logger.info('Retry to read configuration file to find pythia-pgs path') 497 self.set_configuration() 498 499 if not self.options['pythia-pgs_path'] or not \ 500 os.path.exists(pjoin(self.options['pythia-pgs_path'],'src')): 501 error_msg = 'No valid pythia-pgs path set.' 502 error_msg += 'Please use the set command to define the path and retry.' 503 error_msg += 'You can also define it in the configuration file.' 504 raise self.InvalidCmd(error_msg) 505 506 tag = [a for a in arg if a.startswith('--tag=')] 507 if tag: 508 arg.remove(tag[0]) 509 tag = tag[0][6:] 510 511 512 if len(arg) == 0 and not self.run_name: 513 if self.results.lastrun: 514 arg.insert(0, self.results.lastrun) 515 else: 516 raise self.InvalidCmd('No run name defined. Please add this information.') 517 518 if len(arg) == 1 and self.run_name == arg[0]: 519 arg.pop(0) 520 521 if not len(arg) and \ 522 not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): 523 self.help_pgs() 524 raise self.InvalidCmd('''No file pythia_events.hep currently available. Please specify a valid run_name''') 525 526 lock = None 527 if len(arg) == 1: 528 prev_tag = self.set_run_name(arg[0], tag, 'pgs') 529 filenames = misc.glob('events_*.hep.gz', pjoin(self.me_dir, 'Events', self.run_name)) 530 531 if not filenames: 532 raise self.InvalidCmd('No event file corresponding to %s run with tag %s. '% (self.run_name, prev_tag)) 533 else: 534 input_file = filenames[0] 535 output_file = pjoin(self.me_dir, 'Events', 'pythia_events.hep') 536 lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), 537 argument=['-c', input_file], 538 close_fds=True) 539 else: 540 if tag: 541 self.run_card['run_tag'] = tag 542 self.set_run_name(self.run_name, tag, 'pgs') 543 544 return lock
545 546
547 - def check_delphes(self, arg):
548 """Check the argument for pythia command 549 syntax: delphes [NAME] 550 Note that other option are already remove at this point 551 """ 552 553 # If not pythia-pgs path 554 if not self.options['delphes_path']: 555 logger.info('Retry to read configuration file to find delphes path') 556 self.set_configuration() 557 558 if not self.options['delphes_path']: 559 error_msg = 'No valid delphes path set.' 560 error_msg += 'Please use the set command to define the path and retry.' 561 error_msg += 'You can also define it in the configuration file.' 562 raise self.InvalidCmd(error_msg) 563 564 tag = [a for a in arg if a.startswith('--tag=')] 565 if tag: 566 arg.remove(tag[0]) 567 tag = tag[0][6:] 568 569 570 if len(arg) == 0 and not self.run_name: 571 if self.results.lastrun: 572 arg.insert(0, self.results.lastrun) 573 else: 574 raise self.InvalidCmd('No run name defined. Please add this information.') 575 576 if len(arg) == 1 and self.run_name == arg[0]: 577 arg.pop(0) 578 579 if not len(arg) and \ 580 not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): 581 self.help_pgs() 582 raise self.InvalidCmd('''No file pythia_events.hep currently available. Please specify a valid run_name''') 583 584 if len(arg) == 1: 585 prev_tag = self.set_run_name(arg[0], tag, 'delphes') 586 filenames = misc.glob('events_*.hep.gz', pjoin(self.me_dir, 'Events')) 587 588 589 if not filenames: 590 raise self.InvalidCmd('No event file corresponding to %s run with tag %s.:%s '\ 591 % (self.run_name, prev_tag, 592 pjoin(self.me_dir,'Events',self.run_name, '%s_pythia_events.hep.gz' % prev_tag))) 593 else: 594 input_file = filenames[0] 595 output_file = pjoin(self.me_dir, 'Events', 'pythia_events.hep') 596 lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), 597 argument=['-c', input_file], 598 close_fds=True) 599 else: 600 if tag: 601 self.run_card['run_tag'] = tag 602 self.set_run_name(self.run_name, tag, 'delphes')
603
604 - def check_calculate_xsect(self, args, options):
605 """check the validity of the line. args is ORDER, 606 ORDER being LO or NLO. If no mode is passed, NLO is used""" 607 # modify args in order to be DIR 608 # mode being either standalone or madevent 609 610 if options['force']: 611 self.force = True 612 613 if not args: 614 args.append('NLO') 615 return 616 617 if len(args) > 1: 618 self.help_calculate_xsect() 619 raise self.InvalidCmd( 'Invalid Syntax: too many arguments') 620 621 elif len(args) == 1: 622 if not args[0] in ['NLO', 'LO']: 623 raise self.InvalidCmd('%s is not a valid mode, please use "LO" or "NLO"' % args[1]) 624 mode = args[0] 625 626 # check for incompatible options/modes 627 if options['multicore'] and options['cluster']: 628 raise self.InvalidCmd('options -m (--multicore) and -c (--cluster)' + \ 629 ' are not compatible. Please choose one.')
630 631
632 - def check_generate_events(self, args, options):
633 """check the validity of the line. args is ORDER, 634 ORDER being LO or NLO. If no mode is passed, NLO is used""" 635 # modify args in order to be DIR 636 # mode being either standalone or madevent 637 638 if not args: 639 args.append('NLO') 640 return 641 642 if len(args) > 1: 643 self.help_generate_events() 644 raise self.InvalidCmd('Invalid Syntax: too many arguments') 645 646 elif len(args) == 1: 647 if not args[0] in ['NLO', 'LO']: 648 raise self.InvalidCmd('%s is not a valid mode, please use "LO" or "NLO"' % args[1]) 649 mode = args[0] 650 651 # check for incompatible options/modes 652 if options['multicore'] and options['cluster']: 653 raise self.InvalidCmd('options -m (--multicore) and -c (--cluster)' + \ 654 ' are not compatible. Please choose one.')
655
656 - def check_banner_run(self, args):
657 """check the validity of line""" 658 659 if len(args) == 0: 660 self.help_banner_run() 661 raise self.InvalidCmd('banner_run requires at least one argument.') 662 663 tag = [a[6:] for a in args if a.startswith('--tag=')] 664 665 666 if os.path.exists(args[0]): 667 type ='banner' 668 format = self.detect_card_type(args[0]) 669 if format != 'banner': 670 raise self.InvalidCmd('The file is not a valid banner.') 671 elif tag: 672 args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ 673 (args[0], tag)) 674 if not os.path.exists(args[0]): 675 raise self.InvalidCmd('No banner associates to this name and tag.') 676 else: 677 name = args[0] 678 type = 'run' 679 banners = misc.glob('*_banner.txt', pjoin(self.me_dir,'Events', args[0])) 680 if not banners: 681 raise self.InvalidCmd('No banner associates to this name.') 682 elif len(banners) == 1: 683 args[0] = banners[0] 684 else: 685 #list the tag and propose those to the user 686 tags = [os.path.basename(p)[len(args[0])+1:-11] for p in banners] 687 tag = self.ask('which tag do you want to use?', tags[0], tags) 688 args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ 689 (args[0], tag)) 690 691 run_name = [arg[7:] for arg in args if arg.startswith('--name=')] 692 if run_name: 693 try: 694 self.exec_cmd('remove %s all banner -f' % run_name) 695 except Exception: 696 pass 697 self.set_run_name(args[0], tag=None, level='parton', reload_card=True) 698 elif type == 'banner': 699 self.set_run_name(self.find_available_run_name(self.me_dir)) 700 elif type == 'run': 701 if not self.results[name].is_empty(): 702 run_name = self.find_available_run_name(self.me_dir) 703 logger.info('Run %s is not empty so will use run_name: %s' % \ 704 (name, run_name)) 705 self.set_run_name(run_name) 706 else: 707 try: 708 self.exec_cmd('remove %s all banner -f' % run_name) 709 except Exception: 710 pass 711 self.set_run_name(name)
712 713 714
715 - def check_launch(self, args, options):
716 """check the validity of the line. args is MODE 717 MODE being LO, NLO, aMC@NLO or aMC@LO. If no mode is passed, auto is used""" 718 # modify args in order to be DIR 719 # mode being either standalone or madevent 720 721 if options['force']: 722 self.force = True 723 724 725 if not args: 726 args.append('auto') 727 return 728 729 if len(args) > 1: 730 self.help_launch() 731 raise self.InvalidCmd( 'Invalid Syntax: too many arguments') 732 733 elif len(args) == 1: 734 if not args[0] in ['LO', 'NLO', 'aMC@NLO', 'aMC@LO','auto']: 735 raise self.InvalidCmd('%s is not a valid mode, please use "LO", "NLO", "aMC@NLO" or "aMC@LO"' % args[0]) 736 mode = args[0] 737 738 # check for incompatible options/modes 739 if options['multicore'] and options['cluster']: 740 raise self.InvalidCmd('options -m (--multicore) and -c (--cluster)' + \ 741 ' are not compatible. Please choose one.') 742 if mode == 'NLO' and options['reweightonly']: 743 raise self.InvalidCmd('option -r (--reweightonly) needs mode "aMC@NLO" or "aMC@LO"')
744 745
746 - def check_compile(self, args, options):
747 """check the validity of the line. args is MODE 748 MODE being FO or MC. If no mode is passed, MC is used""" 749 # modify args in order to be DIR 750 # mode being either standalone or madevent 751 752 if options['force']: 753 self.force = True 754 755 if not args: 756 args.append('MC') 757 return 758 759 if len(args) > 1: 760 self.help_compile() 761 raise self.InvalidCmd('Invalid Syntax: too many arguments') 762 763 elif len(args) == 1: 764 if not args[0] in ['MC', 'FO']: 765 raise self.InvalidCmd('%s is not a valid mode, please use "FO" or "MC"' % args[0]) 766 mode = args[0]
767
768 # check for incompatible options/modes 769 770 771 #=============================================================================== 772 # CompleteForCmd 773 #=============================================================================== 774 -class CompleteForCmd(CheckValidForCmd):
775 """ The Series of help routine for the MadGraphCmd""" 776
777 - def complete_launch(self, text, line, begidx, endidx):
778 """auto-completion for launch command""" 779 780 args = self.split_arg(line[0:begidx]) 781 if len(args) == 1: 782 #return mode 783 return self.list_completion(text,['LO','NLO','aMC@NLO','aMC@LO'],line) 784 elif len(args) == 2 and line[begidx-1] == '@': 785 return self.list_completion(text,['LO','NLO'],line) 786 else: 787 opts = [] 788 for opt in _launch_parser.option_list: 789 opts += opt._long_opts + opt._short_opts 790 return self.list_completion(text, opts, line)
791
792 - def complete_banner_run(self, text, line, begidx, endidx, formatting=True):
793 "Complete the banner run command" 794 try: 795 796 797 args = self.split_arg(line[0:begidx], error=False) 798 799 if args[-1].endswith(os.path.sep): 800 return self.path_completion(text, 801 os.path.join('.',*[a for a in args \ 802 if a.endswith(os.path.sep)])) 803 804 805 if len(args) > 1: 806 # only options are possible 807 tags = misc.glob('%s_*_banner.txt' % args[1],pjoin(self.me_dir, 'Events' , args[1])) 808 tags = ['%s' % os.path.basename(t)[len(args[1])+1:-11] for t in tags] 809 810 if args[-1] != '--tag=': 811 tags = ['--tag=%s' % t for t in tags] 812 else: 813 return self.list_completion(text, tags) 814 return self.list_completion(text, tags +['--name=','-f'], line) 815 816 # First argument 817 possibilites = {} 818 819 comp = self.path_completion(text, os.path.join('.',*[a for a in args \ 820 if a.endswith(os.path.sep)])) 821 if os.path.sep in line: 822 return comp 823 else: 824 possibilites['Path from ./'] = comp 825 826 run_list = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) 827 run_list = [n.rsplit('/',2)[1] for n in run_list] 828 possibilites['RUN Name'] = self.list_completion(text, run_list) 829 830 return self.deal_multiple_categories(possibilites, formatting) 831 832 833 except Exception as error: 834 print(error)
835 836
837 - def complete_compile(self, text, line, begidx, endidx):
838 """auto-completion for launch command""" 839 840 args = self.split_arg(line[0:begidx]) 841 if len(args) == 1: 842 #return mode 843 return self.list_completion(text,['FO','MC'],line) 844 else: 845 opts = [] 846 for opt in _compile_parser.option_list: 847 opts += opt._long_opts + opt._short_opts 848 return self.list_completion(text, opts, line)
849
850 - def complete_calculate_xsect(self, text, line, begidx, endidx):
851 """auto-completion for launch command""" 852 853 args = self.split_arg(line[0:begidx]) 854 if len(args) == 1: 855 #return mode 856 return self.list_completion(text,['LO','NLO'],line) 857 else: 858 opts = [] 859 for opt in _calculate_xsect_parser.option_list: 860 opts += opt._long_opts + opt._short_opts 861 return self.list_completion(text, opts, line)
862
863 - def complete_generate_events(self, text, line, begidx, endidx):
864 """auto-completion for generate_events command 865 call the compeltion for launch""" 866 self.complete_launch(text, line, begidx, endidx)
867 868
869 - def complete_shower(self, text, line, begidx, endidx):
870 args = self.split_arg(line[0:begidx]) 871 if len(args) == 1: 872 #return valid run_name 873 data = misc.glob(pjoin('*','events.lhe.gz', pjoin(self.me_dir, 'Events'))) 874 data = [n.rsplit('/',2)[1] for n in data] 875 tmp1 = self.list_completion(text, data) 876 if not self.run_name: 877 return tmp1
878
879 - def complete_plot(self, text, line, begidx, endidx):
880 """ Complete the plot command """ 881 882 args = self.split_arg(line[0:begidx], error=False) 883 884 if len(args) == 1: 885 #return valid run_name 886 data = misc.glob(pjoin('*','events.lhe*', pjoin(self.me_dir, 'Events'))) 887 data = [n.rsplit('/',2)[1] for n in data] 888 tmp1 = self.list_completion(text, data) 889 if not self.run_name: 890 return tmp1 891 892 if len(args) > 1: 893 return self.list_completion(text, self._plot_mode)
894
895 - def complete_pgs(self,text, line, begidx, endidx):
896 "Complete the pgs command" 897 args = self.split_arg(line[0:begidx], error=False) 898 if len(args) == 1: 899 #return valid run_name 900 data = misc.glob(pjoin('*', 'events_*.hep.gz'), 901 pjoin(self.me_dir, 'Events')) 902 data = [n.rsplit('/',2)[1] for n in data] 903 tmp1 = self.list_completion(text, data) 904 if not self.run_name: 905 return tmp1 906 else: 907 tmp2 = self.list_completion(text, self._run_options + ['-f', 908 '--tag=' ,'--no_default'], line) 909 return tmp1 + tmp2 910 else: 911 return self.list_completion(text, self._run_options + ['-f', 912 '--tag=','--no_default'], line)
913 914 complete_delphes = complete_pgs
915
916 -class aMCatNLOAlreadyRunning(InvalidCmd):
917 pass
918
919 -class AskRunNLO(cmd.ControlSwitch):
920 921 to_control = [('order', 'Type of perturbative computation'), 922 ('fixed_order', 'No MC@[N]LO matching / event generation'), 923 ('shower', 'Shower the generated events'), 924 ('madspin', 'Decay onshell particles'), 925 ('reweight', 'Add weights to events for new hypp.'), 926 ('madanalysis','Run MadAnalysis5 on the events generated')] 927 928 quit_on = cmd.ControlSwitch.quit_on + ['onlyshower'] 929
930 - def __init__(self, question, line_args=[], mode=None, force=False, 931 *args, **opt):
932 933 self.me_dir = opt['mother_interface'].me_dir 934 self.check_available_module(opt['mother_interface'].options) 935 self.last_mode = opt['mother_interface'].last_mode 936 self.proc_characteristics = opt['mother_interface'].proc_characteristics 937 self.run_card = banner_mod.RunCard(pjoin(self.me_dir,'Cards', 'run_card.dat'), 938 consistency='warning') 939 940 hide_line = [] 941 if 'QED' in self.proc_characteristics['splitting_types']: 942 hide_line = ['madspin', 'shower', 'reweight', 'madanalysis'] 943 944 super(AskRunNLO,self).__init__(self.to_control, opt['mother_interface'], 945 *args, hide_line=hide_line, force=force, 946 **opt)
947 948 @property
949 - def answer(self):
950 951 out = super(AskRunNLO, self).answer 952 if out['shower'] == 'HERWIG7': 953 out['shower'] = 'HERWIGPP' 954 955 if out['shower'] not in self.get_allowed('shower') or out['shower'] =='OFF': 956 out['runshower'] = False 957 else: 958 out['runshower'] = True 959 return out
960
961 - def default(self,*args, **opts):
962 self.nb_fo_warning = 0 963 super(AskRunNLO, self).default(*args, **opts)
964
965 - def check_available_module(self, options):
966 967 self.available_module = set() 968 if options['madanalysis5_path']: 969 self.available_module.add('MA5') 970 if not aMCatNLO or ('mg5_path' in options and options['mg5_path']): 971 972 self.available_module.add('MadSpin') 973 if misc.has_f2py() or options['f2py_compiler']: 974 self.available_module.add('reweight') 975 if options['pythia8_path']: 976 self.available_module.add('PY8') 977 if options['hwpp_path'] and options['thepeg_path'] and options['hepmc_path']: 978 self.available_module.add('HW7') 979 980 MCatNLO_libdir = pjoin(self.me_dir, 'MCatNLO', 'lib') 981 if os.path.exists(os.path.realpath(pjoin(MCatNLO_libdir, 'libstdhep.a'))): 982 self.available_module.add('StdHEP')
983 # 984 # shorcut 985 #
986 - def ans_lo(self, value):
987 """ function called if the user type lo=value. or lo (then value is None)""" 988 989 if value is None: 990 self.switch['order'] = 'LO' 991 self.switch['fixed_order'] = 'ON' 992 self.set_switch('shower', 'OFF') 993 else: 994 logger.warning('Invalid command: lo=%s' % value)
995
996 - def ans_nlo(self, value):
997 if value is None: 998 self.switch['order'] = 'NLO' 999 self.switch['fixed_order'] = 'ON' 1000 self.set_switch('shower', 'OFF') 1001 else: 1002 logger.warning('Invalid command: nlo=%s' % value)
1003
1004 - def ans_amc__at__nlo(self, value):
1005 if value is None: 1006 self.switch['order'] = 'NLO' 1007 self.switch['fixed_order'] = 'OFF' 1008 self.set_switch('shower', 'ON') 1009 else: 1010 logger.warning('Invalid command: aMC@NLO=%s' % value)
1011
1012 - def ans_amc__at__lo(self, value):
1013 if value is None: 1014 self.switch['order'] = 'LO' 1015 self.switch['fixed_order'] = 'OFF' 1016 self.set_switch('shower', 'ON') 1017 else: 1018 logger.warning('Invalid command: aMC@LO=%s' % value)
1019
1020 - def ans_noshower(self, value):
1021 if value is None: 1022 self.switch['order'] = 'NLO' 1023 self.switch['fixed_order'] = 'OFF' 1024 self.set_switch('shower', 'OFF') 1025 else: 1026 logger.warning('Invalid command: noshower=%s' % value)
1027
1028 - def ans_onlyshower(self, value):
1029 if value is None: 1030 self.switch['mode'] = 'onlyshower' 1031 self.switch['madspin'] = 'OFF' 1032 self.switch['reweight'] = 'OFF' 1033 else: 1034 logger.warning('Invalid command: onlyshower=%s' % value)
1035
1036 - def ans_noshowerlo(self, value):
1037 if value is None: 1038 self.switch['order'] = 'LO' 1039 self.switch['fixed_order'] = 'OFF' 1040 self.set_switch('shower', 'OFF') 1041 else: 1042 logger.warning('Invalid command: noshowerlo=%s' % value)
1043
1044 - def ans_madanalysis5(self, value):
1045 """ shortcut madanalysis5 -> madanalysis """ 1046 1047 if value is None: 1048 return self.onecmd('madanalysis') 1049 else: 1050 self.set_switch('madanalysis', value)
1051 # 1052 # ORDER 1053 #
1054 - def get_allowed_order(self):
1055 return ["LO", "NLO"]
1056
1057 - def set_default_order(self):
1058 1059 if self.last_mode in ['LO', 'aMC@L0', 'noshowerLO']: 1060 self.switch['order'] = 'LO' 1061 self.switch['order'] = 'NLO'
1062
1063 - def set_switch_off_order(self):
1064 return
1065 # 1066 # Fix order 1067 #
1068 - def get_allowed_fixed_order(self):
1069 """ """ 1070 1071 if self.proc_characteristics['ninitial'] == 1 or \ 1072 'QED' in self.proc_characteristics['splitting_types']: 1073 return ['ON'] 1074 else: 1075 return ['ON', 'OFF']
1076
1077 - def set_default_fixed_order(self):
1078 1079 if self.last_mode in ['LO', 'NLO']: 1080 self.switch['fixed_order'] = 'ON' 1081 elif self.proc_characteristics['ninitial'] == 1: 1082 self.switch['fixed_order'] = 'ON' 1083 elif 'QED' in self.proc_characteristics['splitting_types']: 1084 self.switch['fixed_order'] = 'ON' 1085 else: 1086 self.switch['fixed_order'] = 'OFF'
1087
1088 - def color_for_fixed_order(self, switch_value):
1089 1090 if switch_value in ['OFF']: 1091 return self.green % switch_value 1092 else: 1093 return self.red % switch_value
1094
1095 - def print_options_fixed_order(self):
1096 1097 if 'QED' in self.proc_characteristics['splitting_types']: 1098 return "No NLO+PS available for EW correction" 1099 else: 1100 return self.print_options('fixed_order', keep_default=True)
1101
1102 - def color_for_shower(self, switch_value):
1103 1104 if switch_value in ['ON']: 1105 return self.green % switch_value 1106 elif switch_value in self.get_allowed('shower'): 1107 return self.green % switch_value 1108 else: 1109 return self.red % switch_value
1110 1111
1112 - def consistency_QED(self, key, value, switch):
1113 """ temporary way to forbid event generation due to lack of validation""" 1114 1115 # if True: 1116 if 'QED' in self.proc_characteristics['splitting_types']: 1117 out = {} 1118 to_check ={'fixed_order': ['ON'], 1119 'shower': ['OFF'], 1120 'madanalysis': ['OFF'], 1121 'madspin': ['OFF','onshell','none'], 1122 'reweight': ['OFF']} 1123 for key, allowed in to_check.items(): 1124 if switch[key] not in allowed: 1125 out[key] = allowed[0] 1126 if not self.nb_fo_warning: 1127 if 'QED' in self.proc_characteristics['splitting_types']: 1128 logger.warning("NLO+PS mode is not allowed for processes including electroweak corrections") 1129 self.nb_fo_warning = 1 1130 else: 1131 return self.check_consistency_with_all(key, value) 1132 return out
1133 #apply to all related to the group 1134 consistency_fixed_order = lambda self, *args, **opts: self.consistency_QED('fixed_order', *args, **opts) 1135 consistency_shower = lambda self, *args, **opts: self.consistency_QED('shower', *args, **opts) 1136 consistency_madanalysis = lambda self, *args, **opts: self.consistency_QED('madanalysis', *args, **opts) 1137 consistency_madspin = lambda self, *args, **opts: self.consistency_QED('madspin', *args, **opts) 1138 consistency_reweight = lambda self, *args, **opts: self.consistency_QED('reweight', *args, **opts) 1139
1140 - def consistency_fixed_order_shower(self, vfix, vshower):
1141 """ consistency_XX_YY(val_XX, val_YY) 1142 -> XX is the new key set by the user to a new value val_XX 1143 -> YY is another key set by the user. 1144 -> return value should be None or "replace_YY" 1145 """ 1146 1147 if vfix == 'ON' and vshower != 'OFF' : 1148 return 'OFF' 1149 return None
1150 1151 consistency_fixed_order_madspin = consistency_fixed_order_shower 1152 consistency_fixed_order_reweight = consistency_fixed_order_shower 1153 1154
1155 - def consistency_fixed_order_madanalysis(self, vfix, vma5):
1156 1157 if vfix == 'ON' and vma5 == 'ON' : 1158 return 'OFF' 1159 return None
1160 1161
1162 - def consistency_shower_fixed_order(self, vshower, vfix):
1163 """ consistency_XX_YY(val_XX, val_YY) 1164 -> XX is the new key set by the user to a new value val_XX 1165 -> YY is another key set by the user. 1166 -> return value should be None or "replace_YY" 1167 """ 1168 1169 if vshower != 'OFF' and vfix == 'ON': 1170 return 'OFF' 1171 return None
1172 1173 consistency_madspin_fixed_order = consistency_shower_fixed_order 1174 consistency_reweight_fixed_order = consistency_shower_fixed_order 1175 consistency_madanalysis_fixed_order = consistency_shower_fixed_order 1176 1177 1178 # 1179 # Shower 1180 #
1181 - def get_allowed_shower(self):
1182 """ """ 1183 1184 if hasattr(self, 'allowed_shower'): 1185 return self.allowed_shower 1186 1187 if 'QED' in self.proc_characteristics['splitting_types']: 1188 self.allowed_shower = ['OFF'] 1189 return self.allowed_shower 1190 1191 if not misc.which('bc'): 1192 return ['OFF'] 1193 1194 if self.proc_characteristics['ninitial'] == 1: 1195 self.allowed_shower = ['OFF'] 1196 return ['OFF'] 1197 else: 1198 if 'StdHEP' in self.available_module: 1199 allowed = ['HERWIG6','OFF', 'PYTHIA6Q', 'PYTHIA6PT', ] 1200 else: 1201 allowed = ['OFF'] 1202 if 'PY8' in self.available_module: 1203 allowed.append('PYTHIA8') 1204 if 'HW7' in self.available_module: 1205 allowed.append('HERWIGPP') 1206 1207 1208 self.allowed_shower = allowed 1209 1210 return allowed
1211
1212 - def check_value_shower(self, value):
1213 """ """ 1214 1215 if value.upper() in self.get_allowed_shower(): 1216 return True 1217 if value.upper() in ['PYTHIA8', 'HERWIGPP']: 1218 return True 1219 if value.upper() == 'ON': 1220 return self.run_card['parton_shower'] 1221 if value.upper() in ['P8','PY8','PYTHIA_8']: 1222 return 'PYTHIA8' 1223 if value.upper() in ['PY6','P6','PY6PT', 'PYTHIA_6', 'PYTHIA_6PT','PYTHIA6PT','PYTHIA6_PT']: 1224 return 'PYTHIA6PT' 1225 if value.upper() in ['PY6Q', 'PYTHIA_6Q','PYTHIA6Q', 'PYTHIA6_Q']: 1226 return 'PYTHIA6Q' 1227 if value.upper() in ['HW7', 'HERWIG7']: 1228 return 'HERWIG7' 1229 if value.upper() in ['HW++', 'HWPP', 'HERWIG++']: 1230 return 'HERWIGPP' 1231 if value.upper() in ['HW6', 'HERWIG_6']: 1232 return 'HERWIG6'
1233
1234 - def set_default_shower(self):
1235 1236 if 'QED' in self.proc_characteristics['splitting_types']: 1237 self.switch['shower'] = 'Not Avail' 1238 elif self.last_mode in ['LO', 'NLO', 'noshower', 'noshowerLO']: 1239 self.switch['shower'] = 'OFF' 1240 elif self.proc_characteristics['ninitial'] == 1: 1241 self.switch['shower'] = 'OFF' 1242 elif os.path.exists(pjoin(self.me_dir, 'Cards', 'shower_card.dat')): 1243 if self.switch['fixed_order'] == "OFF": 1244 self.switch['shower'] = self.run_card['parton_shower'] 1245 elif self.switch['fixed_order'] == "ON": 1246 self.switch['shower'] = "OFF" 1247 else: 1248 if self.switch['fixed_order'] == "ON": 1249 self.switch['shower'] = 'OFF' 1250 else: 1251 self.switch['shower'] = 'OFF (%s)' % self.run_card['parton_shower'] 1252 1253 if self.last_mode in ['LO', 'NLO', 'noshower', 'noshowerLO']: 1254 self.switch['shower'] = 'OFF' 1255 return 1256 1257 if self.proc_characteristics['ninitial'] == 1: 1258 self.switch['shower'] = 'OFF' 1259 return 1260 1261 if not misc.which('bc'): 1262 logger.warning('bc command not available. Forbids to run the shower. please install it if you want to run the shower. (sudo apt-get install bc)') 1263 self.switch['shower'] = 'OFF' 1264 return 1265 1266 if os.path.exists(pjoin(self.me_dir, 'Cards', 'shower_card.dat')): 1267 self.switch['shower'] = self.run_card['parton_shower'] 1268 #self.switch['shower'] = 'ON' 1269 self.switch['fixed_order'] = "OFF" 1270 1271
1272 - def consistency_shower_madanalysis(self, vshower, vma5):
1273 """ MA5 only possible with (N)LO+PS if shower is run""" 1274 1275 if vshower == 'OFF' and vma5 == 'ON': 1276 return 'OFF' 1277 return None
1278
1279 - def consistency_madanalysis_shower(self, vma5, vshower):
1280 1281 if vma5=='ON' and vshower == 'OFF': 1282 return 'ON' 1283 return None
1284
1285 - def get_cardcmd_for_shower(self, value):
1286 """ adpat run_card according to this setup. return list of cmd to run""" 1287 1288 if value != 'OFF': 1289 return ['set parton_shower %s' % self.switch['shower']] 1290 return []
1291 1292 # 1293 # madspin 1294 #
1295 - def get_allowed_madspin(self):
1296 """ """ 1297 1298 if hasattr(self, 'allowed_madspin'): 1299 return self.allowed_madspin 1300 1301 self.allowed_madspin = [] 1302 1303 1304 if 'MadSpin' not in self.available_module: 1305 return self.allowed_madspin 1306 if self.proc_characteristics['ninitial'] == 1: 1307 self.available_module.remove('MadSpin') 1308 self.allowed_madspin = ['OFF'] 1309 return self.allowed_madspin 1310 else: 1311 if 'QED' in self.proc_characteristics['splitting_types']: 1312 self.allowed_madspin = ['OFF', 'onshell'] 1313 else: 1314 self.allowed_madspin = ['OFF', 'ON', 'onshell'] 1315 return self.allowed_madspin
1316
1317 - def check_value_madspin(self, value):
1318 """handle alias and valid option not present in get_allowed_madspin 1319 remember that this mode should always be OFF for 1>N. (ON not in allowed value)""" 1320 1321 if value.upper() in self.get_allowed_madspin(): 1322 if value == value.upper(): 1323 return True 1324 else: 1325 return value.upper() 1326 elif value.lower() in self.get_allowed_madspin(): 1327 if value == value.lower(): 1328 return True 1329 else: 1330 return value.lower() 1331 1332 if 'MadSpin' not in self.available_module or \ 1333 'ON' not in self.get_allowed_madspin(): 1334 return False 1335 1336 if value.lower() in ['madspin', 'full']: 1337 return 'full' 1338 elif value.lower() in ['none']: 1339 return 'none'
1340
1341 - def set_default_madspin(self):
1342 1343 if 'MadSpin' in self.available_module: 1344 if os.path.exists(pjoin(self.me_dir,'Cards','madspin_card.dat')): 1345 self.switch['madspin'] = 'ON' 1346 else: 1347 self.switch['madspin'] = 'OFF' 1348 else: 1349 self.switch['madspin'] = 'Not Avail'
1350
1351 - def get_cardcmd_for_madspin(self, value):
1352 """set some command to run before allowing the user to modify the cards.""" 1353 1354 if value == 'onshell': 1355 return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode onshell"] 1356 elif value in ['full', 'madspin']: 1357 return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode madspin"] 1358 elif value == 'none': 1359 return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode none"] 1360 else: 1361 return []
1362 1363 # 1364 # reweight 1365 #
1366 - def get_allowed_reweight(self):
1367 """set the valid (visible) options for reweight""" 1368 1369 if hasattr(self, 'allowed_reweight'): 1370 return getattr(self, 'allowed_reweight') 1371 1372 1373 1374 self.allowed_reweight = [] 1375 if 'QED' in self.proc_characteristics['splitting_types']: 1376 return self.allowed_reweight 1377 if 'reweight' not in self.available_module: 1378 return self.allowed_reweight 1379 if self.proc_characteristics['ninitial'] == 1: 1380 self.available_module.remove('reweight') 1381 self.allowed_reweight.append('OFF') 1382 return self.allowed_reweight 1383 else: 1384 self.allowed_reweight = [ 'OFF', 'ON', 'NLO', 'NLO_TREE','LO'] 1385 return self.allowed_reweight
1386
1387 - def set_default_reweight(self):
1388 """initialise the switch for reweight""" 1389 1390 if 'QED' in self.proc_characteristics['splitting_types']: 1391 self.switch['reweight'] = 'Not Avail' 1392 elif 'reweight' in self.available_module: 1393 if os.path.exists(pjoin(self.me_dir,'Cards','reweight_card.dat')): 1394 self.switch['reweight'] = 'ON' 1395 else: 1396 self.switch['reweight'] = 'OFF' 1397 else: 1398 self.switch['reweight'] = 'Not Avail'
1399
1400 - def get_cardcmd_for_reweight(self, value):
1401 """ adpat run_card according to this setup. return list of cmd to run""" 1402 1403 if value == 'LO': 1404 return ["edit reweight_card --replace_line='change mode' --before_line='launch' change mode LO"] 1405 elif value == 'NLO': 1406 return ["edit reweight_card --replace_line='change mode' --before_line='launch' change mode NLO", 1407 "set store_rwgt_info T"] 1408 elif value == 'NLO_TREE': 1409 return ["edit reweight_card --replace_line='change mode' --before_line='launch' change mode NLO_tree", 1410 "set store_rwgt_info T"] 1411 return []
1412 1413 # 1414 # MadAnalysis5 1415 #
1416 - def get_allowed_madanalysis(self):
1417 1418 if hasattr(self, 'allowed_madanalysis'): 1419 return self.allowed_madanalysis 1420 1421 self.allowed_madanalysis = [] 1422 1423 if 'QED' in self.proc_characteristics['splitting_types']: 1424 return self.allowed_madanalysis 1425 1426 if 'MA5' not in self.available_module: 1427 return self.allowed_madanalysis 1428 1429 if self.proc_characteristics['ninitial'] == 1: 1430 self.available_module.remove('MA5') 1431 self.allowed_madanalysis = ['OFF'] 1432 return self.allowed_madanalysis 1433 else: 1434 self.allowed_madanalysis = ['OFF', 'ON'] 1435 return self.allowed_madanalysis
1436
1437 - def set_default_madanalysis(self):
1438 """initialise the switch for reweight""" 1439 1440 if 'QED' in self.proc_characteristics['splitting_types']: 1441 self.switch['madanalysis'] = 'Not Avail' 1442 elif 'MA5' not in self.available_module: 1443 self.switch['madanalysis'] = 'Not Avail' 1444 elif os.path.exists(pjoin(self.me_dir,'Cards', 'madanalysis5_hadron_card.dat')): 1445 self.switch['madanalysis'] = 'ON' 1446 else: 1447 self.switch['madanalysis'] = 'OFF'
1448
1449 - def check_value_madanalysis(self, value):
1450 """check an entry is valid. return the valid entry in case of shortcut""" 1451 1452 if value.upper() in self.get_allowed('madanalysis'): 1453 return True 1454 value = value.lower() 1455 if value == 'hadron': 1456 return 'ON' if 'ON' in self.get_allowed_madanalysis5 else False 1457 else: 1458 return False
1459
1460 1461 #=============================================================================== 1462 # aMCatNLOCmd 1463 #=============================================================================== 1464 -class aMCatNLOCmd(CmdExtended, HelpToCmd, CompleteForCmd, common_run.CommonRunCmd):
1465 """The command line processor of MadGraph""" 1466 1467 # Truth values 1468 true = ['T','.true.',True,'true'] 1469 # Options and formats available 1470 _run_options = ['--cluster','--multicore','--nb_core=','--nb_core=2', '-c', '-m'] 1471 _generate_options = ['-f', '--laststep=parton', '--laststep=pythia', '--laststep=pgs', '--laststep=delphes'] 1472 _calculate_decay_options = ['-f', '--accuracy=0.'] 1473 _set_options = ['stdout_level','fortran_compiler','cpp_compiler','timeout'] 1474 _plot_mode = ['all', 'parton','shower','pgs','delphes'] 1475 _clean_mode = _plot_mode + ['channel', 'banner'] 1476 _display_opts = ['run_name', 'options', 'variable'] 1477 # survey options, dict from name to type, default value, and help text 1478 # Variables to store object information 1479 web = False 1480 cluster_mode = 0 1481 queue = 'madgraph' 1482 nb_core = None 1483 make_opts_var = {} 1484 1485 next_possibility = { 1486 'start': ['generate_events [OPTIONS]', 'calculate_crossx [OPTIONS]', 'launch [OPTIONS]', 1487 'help generate_events'], 1488 'generate_events': ['generate_events [OPTIONS]', 'shower'], 1489 'launch': ['launch [OPTIONS]', 'shower'], 1490 'shower' : ['generate_events [OPTIONS]'] 1491 } 1492 1493 1494 ############################################################################
1495 - def __init__(self, me_dir = None, options = {}, *completekey, **stdin):
1496 """ add information to the cmd """ 1497 1498 self.start_time = 0 1499 CmdExtended.__init__(self, me_dir, options, *completekey, **stdin) 1500 #common_run.CommonRunCmd.__init__(self, me_dir, options) 1501 1502 self.mode = 'aMCatNLO' 1503 self.nb_core = 0 1504 self.prompt = "%s>"%os.path.basename(pjoin(self.me_dir)) 1505 1506 1507 self.load_results_db() 1508 self.results.def_web_mode(self.web) 1509 # check that compiler is gfortran 4.6 or later if virtuals have been exported 1510 proc_card = open(pjoin(self.me_dir, 'Cards', 'proc_card_mg5.dat')).read() 1511 1512 if not '[real=QCD]' in proc_card: 1513 check_compiler(self.options, block=True)
1514 1515 1516 ############################################################################
1517 - def do_shower(self, line):
1518 """ run the shower on a given parton level file """ 1519 argss = self.split_arg(line) 1520 (options, argss) = _launch_parser.parse_args(argss) 1521 # check argument validity and normalise argument 1522 options = options.__dict__ 1523 options['reweightonly'] = False 1524 self.check_shower(argss, options) 1525 evt_file = pjoin(os.getcwd(), argss[0], 'events.lhe') 1526 self.ask_run_configuration('onlyshower', options) 1527 self.run_mcatnlo(evt_file, options) 1528 1529 self.update_status('', level='all', update_results=True)
1530 1531 ################################################################################
1532 - def do_plot(self, line):
1533 """Create the plot for a given run""" 1534 1535 # Since in principle, all plot are already done automaticaly 1536 args = self.split_arg(line) 1537 # Check argument's validity 1538 self.check_plot(args) 1539 logger.info('plot for run %s' % self.run_name) 1540 1541 if not self.force: 1542 self.ask_edit_cards([], args, plot=True) 1543 1544 if any([arg in ['parton'] for arg in args]): 1545 filename = pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe') 1546 if os.path.exists(filename+'.gz'): 1547 misc.gunzip(filename) 1548 if os.path.exists(filename): 1549 logger.info('Found events.lhe file for run %s' % self.run_name) 1550 shutil.move(filename, pjoin(self.me_dir, 'Events', 'unweighted_events.lhe')) 1551 self.create_plot('parton') 1552 shutil.move(pjoin(self.me_dir, 'Events', 'unweighted_events.lhe'), filename) 1553 misc.gzip(filename) 1554 1555 if any([arg in ['all','parton'] for arg in args]): 1556 filename = pjoin(self.me_dir, 'Events', self.run_name, 'MADatNLO.top') 1557 if os.path.exists(filename): 1558 logger.info('Found MADatNLO.top file for run %s' % \ 1559 self.run_name) 1560 output = pjoin(self.me_dir, 'HTML',self.run_name, 'plots_parton.html') 1561 plot_dir = pjoin(self.me_dir, 'HTML', self.run_name, 'plots_parton') 1562 1563 if not os.path.isdir(plot_dir): 1564 os.makedirs(plot_dir) 1565 top_file = pjoin(plot_dir, 'plots.top') 1566 files.cp(filename, top_file) 1567 madir = self.options['madanalysis_path'] 1568 tag = self.run_card['run_tag'] 1569 td = self.options['td_path'] 1570 misc.call(['%s/plot' % self.dirbin, madir, td], 1571 stdout = open(pjoin(plot_dir, 'plot.log'),'a'), 1572 stderr = subprocess.STDOUT, 1573 cwd=plot_dir) 1574 1575 misc.call(['%s/plot_page-pl' % self.dirbin, 1576 os.path.basename(plot_dir), 1577 'parton'], 1578 stdout = open(pjoin(plot_dir, 'plot.log'),'a'), 1579 stderr = subprocess.STDOUT, 1580 cwd=pjoin(self.me_dir, 'HTML', self.run_name)) 1581 shutil.move(pjoin(self.me_dir, 'HTML',self.run_name ,'plots.html'), 1582 output) 1583 1584 os.remove(pjoin(self.me_dir, 'Events', 'plots.top')) 1585 1586 if any([arg in ['all','shower'] for arg in args]): 1587 filenames = misc.glob('events_*.lhe.gz', pjoin(self.me_dir, 'Events', self.run_name)) 1588 if len(filenames) != 1: 1589 filenames = misc.glob('events_*.hep.gz', pjoin(self.me_dir, 'Events', self.run_name)) 1590 if len(filenames) != 1: 1591 logger.info('No shower-level event file found for run %s' % \ 1592 self.run_name) 1593 return 1594 filename = filenames[0] 1595 misc.gunzip(filename, keep=True, stdout=pjoin(self.me_dir, 'Events','pythia_events.hep')) 1596 1597 if not os.path.exists(pjoin(self.me_dir, 'Cards', 'pythia_card.dat')): 1598 if aMCatNLO and not self.options['mg5_path']: 1599 raise Exception("plotting NLO HEP files requires MG5 utilities.") 1600 1601 files.cp(pjoin(self.options['mg5_path'], 'Template','LO', 'Cards', 'pythia_card_default.dat'), 1602 pjoin(self.me_dir, 'Cards', 'pythia_card.dat')) 1603 self.run_hep2lhe() 1604 else: 1605 filename = filenames[0] 1606 misc.gunzip(filename, keep=True, stdout=pjoin(self.me_dir, 'Events','pythia_events.hep')) 1607 1608 self.create_plot('shower') 1609 lhe_file_name = filename.replace('.hep.gz', '.lhe') 1610 shutil.move(pjoin(self.me_dir, 'Events','pythia_events.lhe'), 1611 lhe_file_name) 1612 misc.gzip(lhe_file_name) 1613 1614 if any([arg in ['all','pgs'] for arg in args]): 1615 filename = pjoin(self.me_dir, 'Events', self.run_name, 1616 '%s_pgs_events.lhco' % self.run_tag) 1617 if os.path.exists(filename+'.gz'): 1618 misc.gunzip(filename) 1619 if os.path.exists(filename): 1620 self.create_plot('PGS') 1621 misc.gzip(filename) 1622 else: 1623 logger.info('No valid files to make PGS plots') 1624 1625 if any([arg in ['all','delphes'] for arg in args]): 1626 filename = pjoin(self.me_dir, 'Events', self.run_name, 1627 '%s_delphes_events.lhco' % self.run_tag) 1628 if os.path.exists(filename+'.gz'): 1629 misc.gunzip(filename) 1630 if os.path.exists(filename): 1631 #shutil.move(filename, pjoin(self.me_dir, 'Events','delphes_events.lhco')) 1632 self.create_plot('Delphes') 1633 #shutil.move(pjoin(self.me_dir, 'Events','delphes_events.lhco'), filename) 1634 misc.gzip(filename) 1635 else: 1636 logger.info('No valid files to make Delphes plots')
1637 1638 1639 ############################################################################
1640 - def do_calculate_xsect(self, line):
1641 """Main commands: calculates LO/NLO cross-section, using madevent_mintFO 1642 this function wraps the do_launch one""" 1643 1644 self.start_time = time.time() 1645 argss = self.split_arg(line) 1646 # check argument validity and normalise argument 1647 (options, argss) = _calculate_xsect_parser.parse_args(argss) 1648 options = options.__dict__ 1649 options['reweightonly'] = False 1650 options['parton'] = True 1651 self.check_calculate_xsect(argss, options) 1652 self.do_launch(line, options, argss)
1653 1654 ############################################################################
1655 - def do_banner_run(self, line):
1656 """Make a run from the banner file""" 1657 1658 args = self.split_arg(line) 1659 #check the validity of the arguments 1660 self.check_banner_run(args) 1661 1662 # Remove previous cards 1663 for name in ['shower_card.dat', 'madspin_card.dat']: 1664 try: 1665 os.remove(pjoin(self.me_dir, 'Cards', name)) 1666 except Exception: 1667 pass 1668 1669 banner_mod.split_banner(args[0], self.me_dir, proc_card=False) 1670 1671 # Check if we want to modify the run 1672 if not self.force: 1673 ans = self.ask('Do you want to modify the Run type and/or any of the Cards?', 'n', ['y','n']) 1674 if ans == 'n': 1675 self.force = True 1676 1677 # Compute run mode: 1678 if self.force: 1679 mode_status = {'order': 'NLO', 'fixed_order': False, 'madspin':False, 'shower':True} 1680 banner = banner_mod.Banner(args[0]) 1681 for line in banner['run_settings']: 1682 if '=' in line: 1683 mode, value = [t.strip() for t in line.split('=')] 1684 mode_status[mode] = value 1685 else: 1686 mode_status = {} 1687 1688 # Call Generate events 1689 self.do_launch('-n %s %s' % (self.run_name, '-f' if self.force else ''), 1690 switch=mode_status)
1691 1692 ############################################################################
1693 - def do_generate_events(self, line):
1694 """Main commands: generate events 1695 this function just wraps the do_launch one""" 1696 self.do_launch(line)
1697 1698 1699 ############################################################################
1700 - def do_treatcards(self, line, amcatnlo=True,mode=''):
1701 """Advanced commands: this is for creating the correct run_card.inc from the nlo format""" 1702 #check if no 'Auto' are present in the file 1703 self.check_param_card(pjoin(self.me_dir, 'Cards','param_card.dat')) 1704 1705 # propagate the FO_card entry FO_LHE_weight_ratio to the run_card. 1706 # this variable is system only in the run_card 1707 # can not be done in EditCard since this parameter is not written in the 1708 # run_card directly. 1709 if mode in ['LO', 'NLO']: 1710 name = 'fo_lhe_weight_ratio' 1711 FO_card = analyse_card.FOAnalyseCard(pjoin(self.me_dir,'Cards', 'FO_analyse_card.dat')) 1712 if name in FO_card: 1713 self.run_card.set(name, FO_card[name], user=False) 1714 name = 'fo_lhe_postprocessing' 1715 if name in FO_card: 1716 self.run_card.set(name, FO_card[name], user=False) 1717 1718 return super(aMCatNLOCmd,self).do_treatcards(line, amcatnlo)
1719 1720 ############################################################################
1721 - def set_configuration(self, amcatnlo=True, **opt):
1722 """assign all configuration variable from file 1723 loop over the different config file if config_file not define """ 1724 return super(aMCatNLOCmd,self).set_configuration(amcatnlo=amcatnlo, **opt)
1725 1726 ############################################################################
1727 - def do_launch(self, line, options={}, argss=[], switch={}):
1728 """Main commands: launch the full chain 1729 options and args are relevant if the function is called from other 1730 functions, such as generate_events or calculate_xsect 1731 mode gives the list of switch needed for the computation (usefull for banner_run) 1732 """ 1733 1734 if not argss and not options: 1735 self.start_time = time.time() 1736 argss = self.split_arg(line) 1737 # check argument validity and normalise argument 1738 (options, argss) = _launch_parser.parse_args(argss) 1739 options = options.__dict__ 1740 self.check_launch(argss, options) 1741 1742 1743 if 'run_name' in list(options.keys()) and options['run_name']: 1744 self.run_name = options['run_name'] 1745 # if a dir with the given run_name already exists 1746 # remove it and warn the user 1747 if os.path.isdir(pjoin(self.me_dir, 'Events', self.run_name)): 1748 logger.warning('Removing old run information in \n'+ 1749 pjoin(self.me_dir, 'Events', self.run_name)) 1750 files.rm(pjoin(self.me_dir, 'Events', self.run_name)) 1751 self.results.delete_run(self.run_name) 1752 else: 1753 self.run_name = '' # will be set later 1754 1755 if options['multicore']: 1756 self.cluster_mode = 2 1757 elif options['cluster']: 1758 self.cluster_mode = 1 1759 1760 if not switch: 1761 mode = argss[0] 1762 1763 if mode in ['LO', 'NLO']: 1764 options['parton'] = True 1765 mode = self.ask_run_configuration(mode, options) 1766 else: 1767 mode = self.ask_run_configuration('auto', options, switch) 1768 1769 self.results.add_detail('run_mode', mode) 1770 1771 self.update_status('Starting run', level=None, update_results=True) 1772 1773 if self.options['automatic_html_opening']: 1774 misc.open_file(os.path.join(self.me_dir, 'crossx.html')) 1775 self.options['automatic_html_opening'] = False 1776 1777 if '+' in mode: 1778 mode = mode.split('+')[0] 1779 self.compile(mode, options) 1780 evt_file = self.run(mode, options) 1781 1782 if self.run_card['nevents'] == 0 and not mode in ['LO', 'NLO']: 1783 logger.info('No event file generated: grids have been set-up with a '\ 1784 'relative precision of %s' % self.run_card['req_acc']) 1785 return 1786 1787 if not mode in ['LO', 'NLO']: 1788 assert evt_file == pjoin(self.me_dir,'Events', self.run_name, 'events.lhe'), '%s != %s' %(evt_file, pjoin(self.me_dir,'Events', self.run_name, 'events.lhe.gz')) 1789 1790 if self.run_card['systematics_program'] == 'systematics': 1791 self.exec_cmd('systematics %s %s ' % (self.run_name, ' '.join(self.run_card['systematics_arguments']))) 1792 1793 self.exec_cmd('reweight -from_cards', postcmd=False) 1794 self.exec_cmd('decay_events -from_cards', postcmd=False) 1795 evt_file = pjoin(self.me_dir,'Events', self.run_name, 'events.lhe') 1796 1797 if not mode in ['LO', 'NLO', 'noshower', 'noshowerLO'] \ 1798 and not options['parton']: 1799 self.run_mcatnlo(evt_file, options) 1800 self.exec_cmd('madanalysis5_hadron --no_default', postcmd=False, printcmd=False) 1801 1802 elif mode == 'noshower': 1803 logger.warning("""You have chosen not to run a parton shower. NLO events without showering are NOT physical. Please, shower the Les Houches events before using them for physics analyses.""") 1804 1805 1806 self.update_status('', level='all', update_results=True) 1807 if self.run_card['ickkw'] == 3 and \ 1808 (mode in ['noshower'] or \ 1809 (('PYTHIA8' not in self.run_card['parton_shower'].upper()) and (mode in ['aMC@NLO']))): 1810 logger.warning("""You are running with FxFx merging enabled. To be able to merge samples of various multiplicities without double counting, you have to remove some events after showering 'by hand'. Please read http://amcatnlo.cern.ch/FxFx_merging.htm for more details.""") 1811 1812 self.store_result() 1813 #check if the param_card defines a scan. 1814 if self.param_card_iterator: 1815 cpath = pjoin(self.me_dir,'Cards','param_card.dat') 1816 param_card_iterator = self.param_card_iterator 1817 self.param_card_iterator = [] #avoid to next generate go trough here 1818 param_card_iterator.store_entry(self.run_name, self.results.current['cross'], 1819 error=self.results.current['error'], 1820 param_card_path=cpath) 1821 orig_name = self.run_name 1822 #go trough the scal 1823 with misc.TMP_variable(self, 'allow_notification_center', False): 1824 for i,card in enumerate(param_card_iterator): 1825 card.write(cpath) 1826 self.check_param_card(cpath, dependent=True) 1827 if not options['force']: 1828 options['force'] = True 1829 if options['run_name']: 1830 options['run_name'] = '%s_%s' % (orig_name, i+1) 1831 if not argss: 1832 argss = [mode, "-f"] 1833 elif argss[0] == "auto": 1834 argss[0] = mode 1835 self.do_launch("", options=options, argss=argss, switch=switch) 1836 #self.exec_cmd("launch -f ",precmd=True, postcmd=True,errorhandling=False) 1837 param_card_iterator.store_entry(self.run_name, self.results.current['cross'], 1838 error=self.results.current['error'], 1839 param_card_path=cpath) 1840 #restore original param_card 1841 param_card_iterator.write(pjoin(self.me_dir,'Cards','param_card.dat')) 1842 name = misc.get_scan_name(orig_name, self.run_name) 1843 path = pjoin(self.me_dir, 'Events','scan_%s.txt' % name) 1844 logger.info("write all cross-section results in %s" % path, '$MG:BOLD') 1845 param_card_iterator.write_summary(path) 1846 1847 if self.allow_notification_center: 1848 misc.apple_notify('Run %s finished' % os.path.basename(self.me_dir), 1849 '%s: %s +- %s ' % (self.results.current['run_name'], 1850 self.results.current['cross'], 1851 self.results.current['error']))
1852 1853 1854 ############################################################################
1855 - def do_compile(self, line):
1856 """Advanced commands: just compile the executables """ 1857 argss = self.split_arg(line) 1858 # check argument validity and normalise argument 1859 (options, argss) = _compile_parser.parse_args(argss) 1860 options = options.__dict__ 1861 options['reweightonly'] = False 1862 options['nocompile'] = False 1863 self.check_compile(argss, options) 1864 1865 mode = {'FO': 'NLO', 'MC': 'aMC@NLO'}[argss[0]] 1866 self.ask_run_configuration(mode, options) 1867 self.compile(mode, options) 1868 1869 1870 self.update_status('', level='all', update_results=True)
1871 1872
1873 - def update_random_seed(self):
1874 """Update random number seed with the value from the run_card. 1875 If this is 0, update the number according to a fresh one""" 1876 iseed = self.run_card['iseed'] 1877 if iseed == 0: 1878 randinit = open(pjoin(self.me_dir, 'SubProcesses', 'randinit')) 1879 iseed = int(randinit.read()[2:]) + 1 1880 randinit.close() 1881 randinit = open(pjoin(self.me_dir, 'SubProcesses', 'randinit'), 'w') 1882 randinit.write('r=%d' % iseed) 1883 randinit.close()
1884 1885
1886 - def run(self, mode, options):
1887 """runs aMC@NLO. Returns the name of the event file created""" 1888 logger.info('Starting run') 1889 1890 if not 'only_generation' in list(options.keys()): 1891 options['only_generation'] = False 1892 1893 self.get_characteristics(pjoin(self.me_dir, 'SubProcesses', 'proc_characteristics')) 1894 self.setup_cluster_or_multicore() 1895 self.update_random_seed() 1896 #find and keep track of all the jobs 1897 folder_names = {'LO': ['born_G*'], 'NLO': ['all_G*'], 1898 'aMC@LO': ['GB*'], 'aMC@NLO': ['GF*']} 1899 folder_names['noshower'] = folder_names['aMC@NLO'] 1900 folder_names['noshowerLO'] = folder_names['aMC@LO'] 1901 p_dirs = [d for d in \ 1902 open(pjoin(self.me_dir, 'SubProcesses', 'subproc.mg')).read().split('\n') if d] 1903 #Clean previous results 1904 self.clean_previous_results(options,p_dirs,folder_names[mode]) 1905 1906 mcatnlo_status = ['Setting up grids', 'Computing upper envelope', 'Generating events'] 1907 1908 1909 if options['reweightonly']: 1910 event_norm=self.run_card['event_norm'] 1911 nevents=self.run_card['nevents'] 1912 return self.reweight_and_collect_events(options, mode, nevents, event_norm) 1913 1914 if mode in ['LO', 'NLO']: 1915 # this is for fixed order runs 1916 mode_dict = {'NLO': 'all', 'LO': 'born'} 1917 logger.info('Doing fixed order %s' % mode) 1918 req_acc = self.run_card['req_acc_FO'] 1919 1920 # create a list of dictionaries "jobs_to_run" with all the 1921 # jobs that need to be run 1922 integration_step=-1 1923 jobs_to_run,jobs_to_collect,integration_step = self.create_jobs_to_run(options,p_dirs, \ 1924 req_acc,mode_dict[mode],integration_step,mode,fixed_order=True) 1925 self.prepare_directories(jobs_to_run,mode) 1926 1927 # loop over the integration steps. After every step, check 1928 # if we have the required accuracy. If this is the case, 1929 # stop running, else do another step. 1930 while True: 1931 integration_step=integration_step+1 1932 self.run_all_jobs(jobs_to_run,integration_step) 1933 self.collect_log_files(jobs_to_run,integration_step) 1934 jobs_to_run,jobs_to_collect=self.collect_the_results(options,req_acc,jobs_to_run, \ 1935 jobs_to_collect,integration_step,mode,mode_dict[mode]) 1936 if not jobs_to_run: 1937 # there are no more jobs to run (jobs_to_run is empty) 1938 break 1939 # We are done. 1940 self.finalise_run_FO(folder_names[mode],jobs_to_collect) 1941 self.update_status('Run complete', level='parton', update_results=True) 1942 return 1943 1944 elif mode in ['aMC@NLO','aMC@LO','noshower','noshowerLO']: 1945 if self.ninitial == 1: 1946 raise aMCatNLOError('Decay processes can only be run at fixed order.') 1947 mode_dict = {'aMC@NLO': 'all', 'aMC@LO': 'born',\ 1948 'noshower': 'all', 'noshowerLO': 'born'} 1949 shower = self.run_card['parton_shower'].upper() 1950 nevents = self.run_card['nevents'] 1951 req_acc = self.run_card['req_acc'] 1952 if nevents == 0 and req_acc < 0 : 1953 raise aMCatNLOError('Cannot determine the required accuracy from the number '\ 1954 'of events, because 0 events requested. Please set '\ 1955 'the "req_acc" parameter in the run_card to a value '\ 1956 'between 0 and 1') 1957 elif req_acc >1 or req_acc == 0 : 1958 raise aMCatNLOError('Required accuracy ("req_acc" in the run_card) should '\ 1959 'be between larger than 0 and smaller than 1, '\ 1960 'or set to -1 for automatic determination. Current '\ 1961 'value is %f' % req_acc) 1962 # For more than 1M events, set req_acc to 0.001 (except when it was explicitly set in the run_card) 1963 elif req_acc < 0 and nevents > 1000000 : 1964 req_acc=0.001 1965 1966 shower_list = ['HERWIG6', 'HERWIGPP', 'PYTHIA6Q', 'PYTHIA6PT', 'PYTHIA8'] 1967 1968 if not shower in shower_list: 1969 raise aMCatNLOError('%s is not a valid parton shower. '\ 1970 'Please use one of the following: %s' \ 1971 % (shower, ', '.join(shower_list))) 1972 1973 # check that PYTHIA6PT is not used for processes with FSR 1974 if shower == 'PYTHIA6PT' and self.proc_characteristics['has_fsr']: 1975 raise aMCatNLOError('PYTHIA6PT does not support processes with FSR') 1976 1977 if mode in ['aMC@NLO', 'aMC@LO']: 1978 logger.info('Doing %s matched to parton shower' % mode[4:]) 1979 elif mode in ['noshower','noshowerLO']: 1980 logger.info('Generating events without running the shower.') 1981 elif options['only_generation']: 1982 logger.info('Generating events starting from existing results') 1983 1984 jobs_to_run,jobs_to_collect,integration_step = self.create_jobs_to_run(options,p_dirs, \ 1985 req_acc,mode_dict[mode],1,mode,fixed_order=False) 1986 # Make sure to update all the jobs to be ready for the event generation step 1987 if options['only_generation']: 1988 jobs_to_run,jobs_to_collect=self.collect_the_results(options,req_acc,jobs_to_run, \ 1989 jobs_to_collect,1,mode,mode_dict[mode],fixed_order=False) 1990 else: 1991 self.prepare_directories(jobs_to_run,mode,fixed_order=False) 1992 1993 1994 # Main loop over the three MINT generation steps: 1995 for mint_step, status in enumerate(mcatnlo_status): 1996 if options['only_generation'] and mint_step < 2: 1997 continue 1998 self.update_status(status, level='parton') 1999 self.run_all_jobs(jobs_to_run,mint_step,fixed_order=False) 2000 self.collect_log_files(jobs_to_run,mint_step) 2001 jobs_to_run,jobs_to_collect=self.collect_the_results(options,req_acc,jobs_to_run, \ 2002 jobs_to_collect,mint_step,mode,mode_dict[mode],fixed_order=False) 2003 if mint_step+1==2 and nevents==0: 2004 self.print_summary(options,2,mode) 2005 return 2006 2007 # Sanity check on the event files. If error the jobs are resubmitted 2008 self.check_event_files(jobs_to_collect) 2009 2010 if self.cluster_mode == 1: 2011 #if cluster run, wait 10 sec so that event files are transferred back 2012 self.update_status( 2013 'Waiting while files are transferred back from the cluster nodes', 2014 level='parton') 2015 time.sleep(10) 2016 2017 event_norm=self.run_card['event_norm'] 2018 # gather the various orders tag and write include files 2019 self.write_orders_tag_info() 2020 2021 return self.reweight_and_collect_events(options, mode, nevents, event_norm)
2022 2023
2024 - def write_orders_tag_info(self):
2025 """Collects the informations on the orders_tag variable from the 2026 different channels and writes a file, linked to the P0 dirs 2027 """ 2028 log = pjoin(self.me_dir, 'Events', self.run_name, 'alllogs_0.html') 2029 content = open(log).read() 2030 taglines = [l for l in content.split('\n') if 'orders_tag_plot=' in l] 2031 orderstags = [] 2032 for l in taglines: 2033 tag = int(l.split()[1]) 2034 if not tag in orderstags: 2035 orderstags.append(tag) 2036 # now write a fortran include file with all the informations 2037 content = '%d\n' % len(orderstags) 2038 content+= '%s\n' % ' '.join(['%d' % v for v in orderstags]) 2039 outfile = open(pjoin(self.me_dir, 'SubProcesses', 'orderstags_glob.dat'), 'w') 2040 outfile.write(content) 2041 outfile.close() 2042 2043 # finally link it into the p dirs 2044 p_dirs = [d for d in \ 2045 open(pjoin(self.me_dir, 'SubProcesses', 'subproc.mg')).read().split('\n') if d] 2046 2047 for p_dir in p_dirs: 2048 if not os.path.isfile(pjoin(self.me_dir, 'SubProcesses', p_dir, 'orderstags_glob.dat')): 2049 files.ln(pjoin(self.me_dir, 'SubProcesses', 'orderstags_glob.dat'), 2050 pjoin(self.me_dir, 'SubProcesses', p_dir)) 2051 2052 return
2053 2054
2055 - def create_jobs_to_run(self,options,p_dirs,req_acc,run_mode,\ 2056 integration_step,mode,fixed_order=True):
2057 """Creates a list of dictionaries with all the jobs to be run""" 2058 jobs_to_run=[] 2059 if not options['only_generation']: 2060 # Fresh, new run. Check all the P*/channels.txt files 2061 # (created by the 'gensym' executable) to set-up all the 2062 # jobs using the default inputs. 2063 npoints = self.run_card['npoints_FO_grid'] 2064 niters = self.run_card['niters_FO_grid'] 2065 for p_dir in p_dirs: 2066 try: 2067 with open(pjoin(self.me_dir,'SubProcesses',p_dir,'channels.txt')) as chan_file: 2068 channels=chan_file.readline().split() 2069 except IOError: 2070 logger.warning('No integration channels found for contribution %s' % p_dir) 2071 continue 2072 if fixed_order: 2073 lch=len(channels) 2074 maxchannels=20 # combine up to 20 channels in a single job 2075 if self.run_card['pineappl']: maxchannels=1 2076 njobs=(int(lch/maxchannels)+1 if lch%maxchannels!= 0 \ 2077 else int(lch/maxchannels)) 2078 for nj in range(1,njobs+1): 2079 job={} 2080 job['p_dir']=p_dir 2081 job['channel']=str(nj) 2082 job['nchans']=(int(lch/njobs)+1 if nj <= lch%njobs else int(lch/njobs)) 2083 job['configs']=' '.join(channels[:job['nchans']]) 2084 del channels[:job['nchans']] 2085 job['split']=0 2086 if req_acc == -1: 2087 job['accuracy']=0 2088 job['niters']=niters 2089 job['npoints']=npoints 2090 elif req_acc > 0: 2091 job['accuracy']=0.05 2092 job['niters']=6 2093 job['npoints']=-1 2094 else: 2095 raise aMCatNLOError('No consistent "req_acc_FO" set. Use a value '+ 2096 'between 0 and 1 or set it equal to -1.') 2097 job['mint_mode']=0 2098 job['run_mode']=run_mode 2099 job['wgt_frac']=1.0 2100 job['wgt_mult']=1.0 2101 jobs_to_run.append(job) 2102 if channels: 2103 raise aMCatNLOError('"channels" is not empty %s' % channels) 2104 else: 2105 for channel in channels: 2106 job={} 2107 job['p_dir']=p_dir 2108 job['channel']=channel 2109 job['split']=0 2110 job['accuracy']=0.03 2111 job['niters']=12 2112 job['npoints']=-1 2113 job['mint_mode']=0 2114 job['run_mode']=run_mode 2115 job['wgt_frac']=1.0 2116 jobs_to_run.append(job) 2117 jobs_to_collect=copy.copy(jobs_to_run) # These are all jobs 2118 else: 2119 # if options['only_generation'] is true, just read the current jobs from file 2120 try: 2121 with open(pjoin(self.me_dir,"SubProcesses","job_status.pkl"),'rb') as f: 2122 jobs_to_collect=pickle.load(f) 2123 for job in jobs_to_collect: 2124 job['dirname']=pjoin(self.me_dir,'SubProcesses',job['dirname'].rsplit('/SubProcesses/',1)[1]) 2125 jobs_to_run=copy.copy(jobs_to_collect) 2126 except: 2127 raise aMCatNLOError('Cannot reconstruct jobs from saved job status in %s' % \ 2128 pjoin(self.me_dir,'SubProcesses','job_status.pkl')) 2129 # Update cross sections and determine which jobs to run next 2130 if fixed_order: 2131 jobs_to_run,jobs_to_collect=self.collect_the_results(options,req_acc,jobs_to_run, 2132 jobs_to_collect,integration_step,mode,run_mode) 2133 # Update the integration_step to make sure that nothing will be overwritten 2134 integration_step=1 2135 for job in jobs_to_run: 2136 while os.path.exists(pjoin(job['dirname'],'res_%s.dat' % integration_step)): 2137 integration_step=integration_step+1 2138 integration_step=integration_step-1 2139 else: 2140 self.append_the_results(jobs_to_collect,integration_step) 2141 return jobs_to_run,jobs_to_collect,integration_step
2142
2143 - def prepare_directories(self,jobs_to_run,mode,fixed_order=True):
2144 """Set-up the G* directories for running""" 2145 name_suffix={'born' :'B' , 'all':'F'} 2146 for job in jobs_to_run: 2147 if job['split'] == 0: 2148 if fixed_order : 2149 dirname=pjoin(self.me_dir,'SubProcesses',job['p_dir'], 2150 job['run_mode']+'_G'+job['channel']) 2151 else: 2152 dirname=pjoin(self.me_dir,'SubProcesses',job['p_dir'], 2153 'G'+name_suffix[job['run_mode']]+job['channel']) 2154 else: 2155 if fixed_order : 2156 dirname=pjoin(self.me_dir,'SubProcesses',job['p_dir'], 2157 job['run_mode']+'_G'+job['channel']+'_'+str(job['split'])) 2158 else: 2159 dirname=pjoin(self.me_dir,'SubProcesses',job['p_dir'], 2160 'G'+name_suffix[job['run_mode']]+job['channel']+'_'+str(job['split'])) 2161 job['dirname']=dirname 2162 if not os.path.isdir(dirname): 2163 os.makedirs(dirname) 2164 self.write_input_file(job,fixed_order) 2165 # link or copy the grids from the base directory to the split directory: 2166 if not fixed_order: 2167 if job['split'] != 0: 2168 for f in ['grid.MC_integer','mint_grids','res_1']: 2169 if not os.path.isfile(pjoin(job['dirname'],f)): 2170 files.ln(pjoin(job['dirname'].rsplit("_",1)[0],f),job['dirname']) 2171 else: 2172 if job['split'] != 0: 2173 for f in ['grid.MC_integer','mint_grids']: 2174 files.cp(pjoin(job['dirname'].rsplit("_",1)[0],f),job['dirname'])
2175 2176
2177 - def write_input_file(self,job,fixed_order):
2178 """write the input file for the madevent_mint* executable in the appropriate directory""" 2179 if fixed_order: 2180 content= \ 2181 """NPOINTS = %(npoints)s 2182 NITERATIONS = %(niters)s 2183 ACCURACY = %(accuracy)s 2184 ADAPT_GRID = 2 2185 MULTICHANNEL = 1 2186 SUM_HELICITY = 1 2187 NCHANS = %(nchans)s 2188 CHANNEL = %(configs)s 2189 SPLIT = %(split)s 2190 WGT_MULT= %(wgt_mult)s 2191 RUN_MODE = %(run_mode)s 2192 RESTART = %(mint_mode)s 2193 """ \ 2194 % job 2195 else: 2196 content = \ 2197 """-1 12 ! points, iterations 2198 %(accuracy)s ! desired fractional accuracy 2199 1 -0.1 ! alpha, beta for Gsoft 2200 -1 -0.1 ! alpha, beta for Gazi 2201 1 ! Suppress amplitude (0 no, 1 yes)? 2202 1 ! Exact helicity sum (0 yes, n = number/event)? 2203 %(channel)s ! Enter Configuration Number: 2204 %(mint_mode)s ! MINT imode: 0 to set-up grids, 1 to perform integral, 2 generate events 2205 1 1 1 ! if imode is 1: Folding parameters for xi_i, phi_i and y_ij 2206 %(run_mode)s ! all, born, real, virt 2207 """ \ 2208 % job 2209 with open(pjoin(job['dirname'], 'input_app.txt'), 'w') as input_file: 2210 input_file.write(content)
2211 2212
2213 - def run_all_jobs(self,jobs_to_run,integration_step,fixed_order=True):
2214 """Loops over the jobs_to_run and executes them using the function 'run_exe'""" 2215 if fixed_order: 2216 if integration_step == 0: 2217 self.update_status('Setting up grids', level=None) 2218 else: 2219 self.update_status('Refining results, step %i' % integration_step, level=None) 2220 self.ijob = 0 2221 name_suffix={'born' :'B', 'all':'F'} 2222 if fixed_order: 2223 run_type="Fixed order integration step %s" % integration_step 2224 else: 2225 run_type="MINT step %s" % integration_step 2226 self.njobs=len(jobs_to_run) 2227 for job in jobs_to_run: 2228 executable='ajob1' 2229 if fixed_order: 2230 arguments=[job['channel'],job['run_mode'], \ 2231 str(job['split']),str(integration_step)] 2232 else: 2233 arguments=[job['channel'],name_suffix[job['run_mode']], \ 2234 str(job['split']),str(integration_step)] 2235 self.run_exe(executable,arguments,run_type, 2236 cwd=pjoin(self.me_dir,'SubProcesses',job['p_dir'])) 2237 2238 if self.cluster_mode == 2: 2239 time.sleep(1) # security to allow all jobs to be launched 2240 self.wait_for_complete(run_type)
2241 2242
2243 - def collect_the_results(self,options,req_acc,jobs_to_run,jobs_to_collect,\ 2244 integration_step,mode,run_mode,fixed_order=True):
2245 """Collect the results, make HTML pages, print the summary and 2246 determine if there are more jobs to run. Returns the list 2247 of the jobs that still need to be run, as well as the 2248 complete list of jobs that need to be collected to get the 2249 final answer. 2250 """ 2251 # Get the results of the current integration/MINT step 2252 self.append_the_results(jobs_to_run,integration_step) 2253 self.cross_sect_dict = self.write_res_txt_file(jobs_to_collect,integration_step) 2254 # Update HTML pages 2255 if fixed_order: 2256 cross, error = self.make_make_all_html_results(folder_names=['%s*' % run_mode], 2257 jobs=jobs_to_collect) 2258 else: 2259 name_suffix={'born' :'B' , 'all':'F'} 2260 cross, error = self.make_make_all_html_results(folder_names=['G%s*' % name_suffix[run_mode]]) 2261 self.results.add_detail('cross', cross) 2262 self.results.add_detail('error', error) 2263 # Combine grids from split fixed order jobs 2264 if fixed_order: 2265 jobs_to_run=self.combine_split_order_run(jobs_to_run) 2266 # Set-up jobs for the next iteration/MINT step 2267 jobs_to_run_new=self.update_jobs_to_run(req_acc,integration_step,jobs_to_run,fixed_order) 2268 # IF THERE ARE NO MORE JOBS, WE ARE DONE!!! 2269 if fixed_order: 2270 # Write the jobs_to_collect directory to file so that we 2271 # can restart them later (with only-generation option) 2272 with open(pjoin(self.me_dir,"SubProcesses","job_status.pkl"),'wb') as f: 2273 pickle.dump(jobs_to_collect,f) 2274 # Print summary 2275 if (not jobs_to_run_new) and fixed_order: 2276 # print final summary of results (for fixed order) 2277 scale_pdf_info=self.collect_scale_pdf_info(options,jobs_to_collect) 2278 self.print_summary(options,integration_step,mode,scale_pdf_info,done=True) 2279 return jobs_to_run_new,jobs_to_collect 2280 elif jobs_to_run_new: 2281 # print intermediate summary of results 2282 scale_pdf_info=[] 2283 self.print_summary(options,integration_step,mode,scale_pdf_info,done=False) 2284 else: 2285 # When we are done for (N)LO+PS runs, do not print 2286 # anything yet. This will be done after the reweighting 2287 # and collection of the events 2288 scale_pdf_info=[] 2289 # Prepare for the next integration/MINT step 2290 if (not fixed_order) and integration_step+1 == 2 : 2291 # Write the jobs_to_collect directory to file so that we 2292 # can restart them later (with only-generation option) 2293 with open(pjoin(self.me_dir,"SubProcesses","job_status.pkl"),'wb') as f: 2294 pickle.dump(jobs_to_collect,f) 2295 # next step is event generation (mint_step 2) 2296 jobs_to_run_new,jobs_to_collect_new= \ 2297 self.check_the_need_to_split(jobs_to_run_new,jobs_to_collect) 2298 self.prepare_directories(jobs_to_run_new,mode,fixed_order) 2299 self.write_nevents_unweighted_file(jobs_to_collect_new,jobs_to_collect) 2300 self.write_nevts_files(jobs_to_run_new) 2301 else: 2302 if fixed_order and (not self.run_card['pineappl']) \ 2303 and self.run_card['req_acc_FO'] > 0: 2304 jobs_to_run_new,jobs_to_collect= \ 2305 self.split_jobs_fixed_order(jobs_to_run_new,jobs_to_collect) 2306 self.prepare_directories(jobs_to_run_new,mode,fixed_order) 2307 jobs_to_collect_new=jobs_to_collect 2308 if fixed_order: 2309 # Write the jobs_to_collect directory to file so that we 2310 # can collect plots (by hand) even if there was some 2311 # error. Mainly for debugging only. Normally this file 2312 # should not be used. (Rather, use 'job_status.pkl' which 2313 # is only written if all jobs are finished correctly) 2314 with open(pjoin(self.me_dir,"SubProcesses","job_status2.pkl"),'wb') as f: 2315 pickle.dump(jobs_to_collect_new,f) 2316 return jobs_to_run_new,jobs_to_collect_new
2317 2318
2319 - def write_nevents_unweighted_file(self,jobs,jobs0events):
2320 """writes the nevents_unweighted file in the SubProcesses directory. 2321 We also need to write the jobs that will generate 0 events, 2322 because that makes sure that the cross section from those channels 2323 is taken into account in the event weights (by collect_events.f). 2324 """ 2325 content=[] 2326 for job in jobs: 2327 path=pjoin(job['dirname'].split('/')[-2],job['dirname'].split('/')[-1]) 2328 lhefile=pjoin(path,'events.lhe') 2329 content.append(' %s %d %9e %9e' % \ 2330 (lhefile.ljust(40),job['nevents'],job['resultABS']*job['wgt_frac'],job['wgt_frac'])) 2331 for job in jobs0events: 2332 if job['nevents']==0: 2333 path=pjoin(job['dirname'].split('/')[-2],job['dirname'].split('/')[-1]) 2334 lhefile=pjoin(path,'events.lhe') 2335 content.append(' %s %d %9e %9e' % \ 2336 (lhefile.ljust(40),job['nevents'],job['resultABS'],1.)) 2337 with open(pjoin(self.me_dir,'SubProcesses',"nevents_unweighted"),'w') as f: 2338 f.write('\n'.join(content)+'\n')
2339
2340 - def write_nevts_files(self,jobs):
2341 """write the nevts files in the SubProcesses/P*/G*/ directories""" 2342 for job in jobs: 2343 with open(pjoin(job['dirname'],'nevts'),'w') as f: 2344 if self.run_card['event_norm'].lower()=='bias': 2345 f.write('%i %f\n' % (job['nevents'],self.cross_sect_dict['xseca'])) 2346 else: 2347 f.write('%i\n' % job['nevents'])
2348
2349 - def combine_split_order_run(self,jobs_to_run):
2350 """Combines jobs and grids from split jobs that have been run""" 2351 # combine the jobs that need to be combined in job 2352 # groups. Simply combine the ones that have the same p_dir and 2353 # same channel. 2354 jobgroups_to_combine=[] 2355 jobs_to_run_new=[] 2356 for job in jobs_to_run: 2357 if job['split'] == 0: 2358 job['combined']=1 2359 jobs_to_run_new.append(job) # this jobs wasn't split 2360 elif job['split'] == 1: 2361 jobgroups_to_combine.append([j for j in jobs_to_run if j['p_dir'] == job['p_dir'] and \ 2362 j['channel'] == job['channel']]) 2363 else: 2364 continue 2365 for job_group in jobgroups_to_combine: 2366 # Combine the grids (mint-grids & MC-integer grids) first 2367 self.combine_split_order_grids(job_group) 2368 jobs_to_run_new.append(self.combine_split_order_jobs(job_group)) 2369 return jobs_to_run_new
2370
2371 - def combine_split_order_jobs(self,job_group):
2372 """combine the jobs in job_group and return a single summed job""" 2373 # first copy one of the jobs in 'jobs' 2374 sum_job=copy.copy(job_group[0]) 2375 # update the information to have a 'non-split' job: 2376 sum_job['dirname']=pjoin(sum_job['dirname'].rsplit('_',1)[0]) 2377 sum_job['split']=0 2378 sum_job['wgt_mult']=1.0 2379 sum_job['combined']=len(job_group) 2380 # information to be summed: 2381 keys=['niters_done','npoints_done','niters','npoints',\ 2382 'result','resultABS','time_spend'] 2383 keys2=['error','errorABS'] 2384 # information to be summed in quadrature: 2385 for key in keys2: 2386 sum_job[key]=math.pow(sum_job[key],2) 2387 # Loop over the jobs and sum the information 2388 for i,job in enumerate(job_group): 2389 if i==0 : continue # skip the first 2390 for key in keys: 2391 sum_job[key]+=job[key] 2392 for key in keys2: 2393 sum_job[key]+=math.pow(job[key],2) 2394 for key in keys2: 2395 sum_job[key]=math.sqrt(sum_job[key]) 2396 sum_job['err_percABS'] = sum_job['errorABS']/sum_job['resultABS']*100. 2397 sum_job['err_perc'] = sum_job['error']/sum_job['result']*100. 2398 sum_job['niters']=int(sum_job['niters_done']/len(job_group)) 2399 sum_job['niters_done']=int(sum_job['niters_done']/len(job_group)) 2400 return sum_job
2401 2402
2403 - def combine_split_order_grids(self,job_group):
2404 """Combines the mint_grids and MC-integer grids from the split order 2405 jobs (fixed order only). 2406 """ 2407 files_mint_grids=[] 2408 files_MC_integer=[] 2409 location=None 2410 for job in job_group: 2411 files_mint_grids.append(pjoin(job['dirname'],'mint_grids')) 2412 files_MC_integer.append(pjoin(job['dirname'],'grid.MC_integer')) 2413 if not location: 2414 location=pjoin(job['dirname'].rsplit('_',1)[0]) 2415 else: 2416 if location != pjoin(job['dirname'].rsplit('_',1)[0]) : 2417 raise aMCatNLOError('Not all jobs have the same location. '\ 2418 +'Cannot combine them.') 2419 # Needed to average the grids (both xgrids, ave_virt and 2420 # MC_integer grids), but sum the cross section info. The 2421 # latter is only the only line that contains integers. 2422 for j,fs in enumerate([files_mint_grids,files_MC_integer]): 2423 linesoffiles=[] 2424 polyfit_data=[] 2425 for f in fs: 2426 with open(f,'r+') as fi: 2427 data=fi.readlines() 2428 linesoffiles.append([ dat for dat in data if 'POL' not in dat.split()[0] ]) 2429 polyfit_data.append([ dat for dat in data if 'POL' in dat.split()[0] ]) 2430 to_write=[] 2431 for rowgrp in zip(*linesoffiles): 2432 action=list(set([row.strip().split()[0] for row in rowgrp])) # list(set()) structure to remove duplicants 2433 floatsbyfile = [[float(a) for a in row.strip().split()[1:]] for row in rowgrp] 2434 floatgrps = list(zip(*floatsbyfile)) 2435 if len(action) != 1: 2436 raise aMCatNLOError('"mint_grids" files not in correct format. '+\ 2437 'Cannot combine them.') 2438 if 'AVE' in action: 2439 # average 2440 write_string = [sum(floatgrp)/len(floatgrp) for floatgrp in floatgrps] 2441 elif 'SUM' in action: 2442 # sum 2443 write_string = [sum(floatgrp) for floatgrp in floatgrps] 2444 elif 'MAX' in action: 2445 # take maximum 2446 write_string = [max(floatgrp) for floatgrp in floatgrps] 2447 elif 'QSM' in action: 2448 # sum in quadrature 2449 write_string = [math.sqrt(sum([err**2 for err in floatgrp])) for floatgrp in floatgrps] 2450 elif 'IDE' in action: 2451 # they should all be identical (INTEGERS) 2452 write_string = [int(round(floatgrp[0])) for floatgrp in floatgrps] 2453 elif 'SPE' in action: 2454 # special: average first; sum second; average third (ALL INTEGERS) 2455 write_string=[] 2456 for i,floatgrp in enumerate(floatgrps): 2457 if i==0: # average number of PS points per iterations 2458 write_string.append(int(sum(floatgrp)/len(floatgrp))) 2459 elif i==1: # sum te number of iterations 2460 write_string.append(int(sum(floatgrp))) 2461 elif i==2: # average the nhits_in_grids 2462 write_string.append(int(sum(floatgrp)/len(floatgrp))) 2463 else: 2464 raise aMCatNLOError('Unknown action for combining grids: %s' % action[0]) 2465 to_write.append(action[0] + " " + (" ".join(str(ws) for ws in write_string)) + "\n") 2466 2467 if polyfit_data: 2468 # special for data regarding virtuals. Need to simply append 2469 # all the data, but skipping doubles. 2470 for i,onefile in enumerate(polyfit_data): 2471 # Get the number of channels, and the number of PS points per channel 2472 data_amount_in_file=[int(oneline.split()[1]) for oneline in onefile if len(oneline.split())==2] 2473 if i==0: 2474 filtered_list=[ [] for i in range(len(data_amount_in_file)) ] 2475 start=len(data_amount_in_file) 2476 for channel,channel_size in enumerate(data_amount_in_file): 2477 end=start+channel_size 2478 for data_channel in onefile[start:end]: 2479 if data_channel not in filtered_list[channel]: 2480 filtered_list[channel].append(data_channel) 2481 start=end 2482 # The amount of data in each file (per channel): 2483 for channel in filtered_list: 2484 to_write.append("POL " + str(len(channel)) + "\n") 2485 # All the data: 2486 for ch in filtered_list: 2487 for dat in ch: 2488 to_write.append(dat) 2489 2490 # write the data over the master location 2491 if j==0: 2492 with open(pjoin(location,'mint_grids'),'w') as f: 2493 f.writelines(to_write) 2494 elif j==1: 2495 with open(pjoin(location,'grid.MC_integer'),'w') as f: 2496 f.writelines(to_write)
2497 2498
2499 - def split_jobs_fixed_order(self,jobs_to_run,jobs_to_collect):
2500 """Looks in the jobs_to_run to see if there is the need to split the 2501 jobs, depending on the expected time they take. Updates 2502 jobs_to_run and jobs_to_collect to replace the split-job by 2503 its splits. 2504 """ 2505 # determine the number jobs we should have (this is per p_dir) 2506 if self.options['run_mode'] ==2: 2507 nb_submit = int(self.options['nb_core']) 2508 elif self.options['run_mode'] ==1: 2509 nb_submit = int(self.options['cluster_size']) 2510 else: 2511 nb_submit =1 2512 # total expected aggregated running time 2513 time_expected=0 2514 for job in jobs_to_run: 2515 time_expected+=job['time_spend']*(job['niters']*job['npoints'])/ \ 2516 (job['niters_done']*job['npoints_done']) 2517 # this means that we must expect the following per job (in 2518 # ideal conditions) 2519 time_per_job=time_expected/(nb_submit*(1+len(jobs_to_run)/2)) 2520 jobs_to_run_new=[] 2521 jobs_to_collect_new=[job for job in jobs_to_collect if job['resultABS']!=0] 2522 for job in jobs_to_run: 2523 # remove current job from jobs_to_collect. Make sure 2524 # to remove all the split ones in case the original 2525 # job had been a split one (before it was re-combined) 2526 for j in [j for j in jobs_to_collect_new if j['p_dir'] == job['p_dir'] and \ 2527 j['channel'] == job['channel']]: 2528 jobs_to_collect_new.remove(j) 2529 time_expected=job['time_spend']*(job['niters']*job['npoints'])/ \ 2530 (job['niters_done']*job['npoints_done']) 2531 # if the time expected for this job is (much) larger than 2532 # the time spend in the previous iteration, and larger 2533 # than the expected time per job, split it 2534 if time_expected > max(2*job['time_spend']/job['combined'],time_per_job): 2535 # determine the number of splits needed 2536 nsplit=min(max(int(time_expected/max(2*job['time_spend']/job['combined'],time_per_job)),2),nb_submit) 2537 for i in range(1,nsplit+1): 2538 job_new=copy.copy(job) 2539 job_new['split']=i 2540 job_new['wgt_mult']=1./float(nsplit) 2541 job_new['dirname']=job['dirname']+'_%i' % job_new['split'] 2542 job_new['accuracy']=min(job['accuracy']*math.sqrt(float(nsplit)),0.1) 2543 if nsplit >= job['niters']: 2544 job_new['npoints']=int(job['npoints']*job['niters']/nsplit) 2545 job_new['niters']=1 2546 else: 2547 job_new['npoints']=int(job['npoints']/nsplit) 2548 jobs_to_collect_new.append(job_new) 2549 jobs_to_run_new.append(job_new) 2550 else: 2551 jobs_to_collect_new.append(job) 2552 jobs_to_run_new.append(job) 2553 return jobs_to_run_new,jobs_to_collect_new
2554 2555
2556 - def check_the_need_to_split(self,jobs_to_run,jobs_to_collect):
2557 """Looks in the jobs_to_run to see if there is the need to split the 2558 event generation step. Updates jobs_to_run and 2559 jobs_to_collect to replace the split-job by its 2560 splits. Also removes jobs that do not need any events. 2561 """ 2562 nevt_job=self.run_card['nevt_job'] 2563 if nevt_job > 0: 2564 jobs_to_collect_new=copy.copy(jobs_to_collect) 2565 for job in jobs_to_run: 2566 nevents=job['nevents'] 2567 if nevents == 0: 2568 jobs_to_collect_new.remove(job) 2569 elif nevents > nevt_job: 2570 jobs_to_collect_new.remove(job) 2571 if nevents % nevt_job != 0 : 2572 nsplit=int(nevents/nevt_job)+1 2573 else: 2574 nsplit=int(nevents/nevt_job) 2575 for i in range(1,nsplit+1): 2576 job_new=copy.copy(job) 2577 left_over=nevents % nsplit 2578 if i <= left_over: 2579 job_new['nevents']=int(nevents/nsplit)+1 2580 job_new['wgt_frac']=float(job_new['nevents'])/float(nevents) 2581 else: 2582 job_new['nevents']=int(nevents/nsplit) 2583 job_new['wgt_frac']=float(job_new['nevents'])/float(nevents) 2584 job_new['split']=i 2585 job_new['dirname']=job['dirname']+'_%i' % job_new['split'] 2586 jobs_to_collect_new.append(job_new) 2587 jobs_to_run_new=copy.copy(jobs_to_collect_new) 2588 else: 2589 jobs_to_run_new=copy.copy(jobs_to_collect) 2590 for job in jobs_to_collect: 2591 if job['nevents'] == 0: 2592 jobs_to_run_new.remove(job) 2593 jobs_to_collect_new=copy.copy(jobs_to_run_new) 2594 2595 return jobs_to_run_new,jobs_to_collect_new
2596 2597
2598 - def update_jobs_to_run(self,req_acc,step,jobs,fixed_order=True):
2599 """ 2600 For (N)LO+PS: determines the number of events and/or the required 2601 accuracy per job. 2602 For fixed order: determines which jobs need higher precision and 2603 returns those with the newly requested precision. 2604 """ 2605 err=self.cross_sect_dict['errt'] 2606 tot=self.cross_sect_dict['xsect'] 2607 errABS=self.cross_sect_dict['erra'] 2608 totABS=self.cross_sect_dict['xseca'] 2609 jobs_new=[] 2610 if fixed_order: 2611 if req_acc == -1: 2612 if step+1 == 1: 2613 npoints = self.run_card['npoints_FO'] 2614 niters = self.run_card['niters_FO'] 2615 for job in jobs: 2616 job['mint_mode']=-1 2617 job['niters']=niters 2618 job['npoints']=npoints 2619 jobs_new.append(job) 2620 elif step+1 == 2: 2621 pass 2622 elif step+1 > 2: 2623 raise aMCatNLOError('Cannot determine number of iterations and PS points '+ 2624 'for integration step %i' % step ) 2625 elif ( req_acc > 0 and err/abs(tot) > req_acc*1.2 ) or step <= 0: 2626 req_accABS=req_acc*abs(tot)/totABS # overal relative required accuracy on ABS Xsec. 2627 for job in jobs: 2628 # skip jobs with 0 xsec 2629 if job['resultABS'] == 0.: 2630 continue 2631 job['mint_mode']=-1 2632 # Determine relative required accuracy on the ABS for this job 2633 job['accuracy']=req_accABS*math.sqrt(totABS/job['resultABS']) 2634 # If already accurate enough, skip the job 2635 if job['accuracy'] > job['errorABS']/job['resultABS'] and step != 0: 2636 continue 2637 # Update the number of PS points based on errorABS, ncall and accuracy 2638 itmax_fl=job['niters_done']*math.pow(job['errorABS']/ 2639 (job['accuracy']*job['resultABS']),2) 2640 itmax_fl=itmax_fl*1.1 # add 10% to make sure to have enough 2641 if itmax_fl <= 4.0 : 2642 job['niters']=max(int(round(itmax_fl)),2) 2643 job['npoints']=job['npoints_done']*2 2644 elif itmax_fl > 4.0 and itmax_fl <= 16.0 : 2645 job['niters']=4 2646 job['npoints']=int(round(job['npoints_done']*itmax_fl/4.0))*2 2647 else: 2648 if itmax_fl > 100.0 : itmax_fl=50.0 2649 job['niters']=int(round(math.sqrt(itmax_fl))) 2650 job['npoints']=int(round(job['npoints_done']*itmax_fl/ 2651 round(math.sqrt(itmax_fl))))*2 2652 # Add the job to the list of jobs that need to be run 2653 jobs_new.append(job) 2654 return jobs_new 2655 elif step+1 <= 2: 2656 nevents=self.run_card['nevents'] 2657 # Total required accuracy for the upper bounding envelope 2658 if req_acc<0: 2659 req_acc2_inv=nevents 2660 else: 2661 req_acc2_inv=1/(req_acc*req_acc) 2662 if step+1 == 1 or step+1 == 2 : 2663 # determine the req. accuracy for each of the jobs for Mint-step = 1 2664 for job in jobs: 2665 accuracy=min(math.sqrt(totABS/(req_acc2_inv*job['resultABS'])),0.2) 2666 job['accuracy']=accuracy 2667 if step+1 == 2: 2668 # Randomly (based on the relative ABS Xsec of the job) determine the 2669 # number of events each job needs to generate for MINT-step = 2. 2670 r=self.get_randinit_seed() 2671 random.seed(r) 2672 totevts=nevents 2673 for job in jobs: 2674 job['nevents'] = 0 2675 while totevts : 2676 target = random.random() * totABS 2677 crosssum = 0. 2678 i = 0 2679 while i<len(jobs) and crosssum < target: 2680 job = jobs[i] 2681 crosssum += job['resultABS'] 2682 i += 1 2683 totevts -= 1 2684 i -= 1 2685 jobs[i]['nevents'] += 1 2686 for job in jobs: 2687 job['mint_mode']=step+1 # next step 2688 return jobs 2689 else: 2690 return []
2691 2692
2693 - def get_randinit_seed(self):
2694 """ Get the random number seed from the randinit file """ 2695 with open(pjoin(self.me_dir,"SubProcesses","randinit")) as randinit: 2696 # format of the file is "r=%d". 2697 iseed = int(randinit.read()[2:]) 2698 return iseed
2699 2700
2701 - def append_the_results(self,jobs,integration_step):
2702 """Appends the results for each of the jobs in the job list""" 2703 error_found=False 2704 for job in jobs: 2705 try: 2706 if integration_step >= 0 : 2707 with open(pjoin(job['dirname'],'res_%s.dat' % integration_step)) as res_file: 2708 results=res_file.readline().split() 2709 else: 2710 # should only be here when doing fixed order with the 'only_generation' 2711 # option equal to True. Take the results from the final run done. 2712 with open(pjoin(job['dirname'],'res.dat')) as res_file: 2713 results=res_file.readline().split() 2714 except IOError: 2715 if not error_found: 2716 error_found=True 2717 error_log=[] 2718 error_log.append(pjoin(job['dirname'],'log.txt')) 2719 continue 2720 job['resultABS']=float(results[0]) 2721 job['errorABS']=float(results[1]) 2722 job['result']=float(results[2]) 2723 job['error']=float(results[3]) 2724 job['niters_done']=int(results[4]) 2725 job['npoints_done']=int(results[5]) 2726 job['time_spend']=float(results[6]) 2727 if job['resultABS'] != 0: 2728 job['err_percABS'] = job['errorABS']/job['resultABS']*100. 2729 job['err_perc'] = job['error']/job['result']*100. 2730 else: 2731 job['err_percABS'] = 0. 2732 job['err_perc'] = 0. 2733 if error_found: 2734 raise aMCatNLOError('An error occurred during the collection of results.\n' + 2735 'Please check the .log files inside the directories which failed:\n' + 2736 '\n'.join(error_log)+'\n')
2737 2738 2739
2740 - def write_res_txt_file(self,jobs,integration_step):
2741 """writes the res.txt files in the SubProcess dir""" 2742 jobs.sort(key = lambda job: -job['errorABS']) 2743 content=[] 2744 content.append('\n\nCross section per integration channel:') 2745 for job in jobs: 2746 content.append('%(p_dir)20s %(channel)15s %(result)10.8e %(error)6.4e %(err_perc)6.4f%% ' % job) 2747 content.append('\n\nABS cross section per integration channel:') 2748 for job in jobs: 2749 content.append('%(p_dir)20s %(channel)15s %(resultABS)10.8e %(errorABS)6.4e %(err_percABS)6.4f%% ' % job) 2750 # print also statistics for each directory 2751 dir_dict={} 2752 for job in jobs: 2753 try: 2754 dir_dict[job['p_dir']]['result'] += job['result']*job['wgt_frac'] 2755 dir_dict[job['p_dir']]['resultABS'] += job['resultABS']*job['wgt_frac'] 2756 # store the error ^2 2757 dir_dict[job['p_dir']]['error'] += math.pow(job['error'], 2)*job['wgt_frac'] 2758 dir_dict[job['p_dir']]['errorABS'] += math.pow(job['errorABS'], 2)*job['wgt_frac'] 2759 except KeyError: 2760 dir_dict[job['p_dir']] = { 2761 'result' : job['result']*job['wgt_frac'], 2762 'resultABS' : job['resultABS']*job['wgt_frac'], 2763 'error' : math.pow(job['error'], 2)*job['wgt_frac'], 2764 'errorABS' : math.pow(job['errorABS'], 2)*job['wgt_frac']} 2765 2766 for dir_res in dir_dict.values(): 2767 dir_res['error'] = math.sqrt(dir_res['error']) 2768 dir_res['errorABS'] = math.sqrt(dir_res['errorABS']) 2769 content.append('\n\nABS cross section per dir') 2770 for ddir, res in dir_dict.items(): 2771 content.append(('%20s' % ddir) + ' %(resultABS)10.8e %(errorABS)6.4e ' % res) 2772 content.append('\n\nCross section per dir') 2773 for ddir, res in dir_dict.items(): 2774 content.append(('%20s' % ddir) + ' %(result)10.8e %(error)6.4e ' % res) 2775 2776 totABS=0 2777 errABS=0 2778 tot=0 2779 err=0 2780 for job in jobs: 2781 totABS+= job['resultABS']*job['wgt_frac'] 2782 errABS+= math.pow(job['errorABS'],2)*job['wgt_frac'] 2783 tot+= job['result']*job['wgt_frac'] 2784 err+= math.pow(job['error'],2)*job['wgt_frac'] 2785 if jobs: 2786 content.append('\nTotal ABS and \nTotal: \n %10.8e +- %6.4e (%6.4e%%)\n %10.8e +- %6.4e (%6.4e%%) \n' %\ 2787 (totABS, math.sqrt(errABS), math.sqrt(errABS)/totABS *100.,\ 2788 tot, math.sqrt(err), math.sqrt(err)/tot *100.)) 2789 with open(pjoin(self.me_dir,'SubProcesses','res_%s.txt' % integration_step),'w') as res_file: 2790 res_file.write('\n'.join(content)) 2791 randinit=self.get_randinit_seed() 2792 return {'xsect':tot,'xseca':totABS,'errt':math.sqrt(err),\ 2793 'erra':math.sqrt(errABS),'randinit':randinit}
2794 2795
2796 - def collect_scale_pdf_info(self,options,jobs):
2797 """read the scale_pdf_dependence.dat files and collects there results""" 2798 scale_pdf_info=[] 2799 if any(self.run_card['reweight_scale']) or any(self.run_card['reweight_PDF']) or \ 2800 len(self.run_card['dynamical_scale_choice']) > 1 or len(self.run_card['lhaid']) > 1: 2801 evt_files=[] 2802 evt_wghts=[] 2803 for job in jobs: 2804 evt_files.append(pjoin(job['dirname'],'scale_pdf_dependence.dat')) 2805 evt_wghts.append(job['wgt_frac']) 2806 scale_pdf_info = self.pdf_scale_from_reweighting(evt_files,evt_wghts) 2807 return scale_pdf_info
2808 2809
2810 - def combine_plots_FO(self,folder_name,jobs):
2811 """combines the plots and puts then in the Events/run* directory""" 2812 devnull = open(os.devnull, 'w') 2813 2814 if self.analyse_card['fo_analysis_format'].lower() == 'topdrawer': 2815 topfiles = [] 2816 for job in jobs: 2817 if job['dirname'].endswith('.top'): 2818 topfiles.append(job['dirname']) 2819 else: 2820 topfiles.append(pjoin(job['dirname'],'MADatNLO.top')) 2821 misc.call(['./combine_plots_FO.sh'] + topfiles, \ 2822 stdout=devnull, 2823 cwd=pjoin(self.me_dir, 'SubProcesses')) 2824 files.cp(pjoin(self.me_dir, 'SubProcesses', 'MADatNLO.top'), 2825 pjoin(self.me_dir, 'Events', self.run_name)) 2826 logger.info('The results of this run and the TopDrawer file with the plots' + \ 2827 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 2828 elif self.analyse_card['fo_analysis_format'].lower() == 'hwu': 2829 out=pjoin(self.me_dir,'Events',self.run_name,'MADatNLO') 2830 self.combine_plots_HwU(jobs,out) 2831 try: 2832 misc.call(['gnuplot','MADatNLO.gnuplot'],\ 2833 stdout=devnull,stderr=devnull,\ 2834 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 2835 except Exception: 2836 pass 2837 logger.info('The results of this run and the HwU and GnuPlot files with the plots' + \ 2838 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 2839 elif self.analyse_card['fo_analysis_format'].lower() == 'root': 2840 rootfiles = [] 2841 for job in jobs: 2842 if job['dirname'].endswith('.root'): 2843 rootfiles.append(job['dirname']) 2844 else: 2845 rootfiles.append(pjoin(job['dirname'],'MADatNLO.root')) 2846 misc.call(['./combine_root.sh'] + folder_name + rootfiles, \ 2847 stdout=devnull, 2848 cwd=pjoin(self.me_dir, 'SubProcesses')) 2849 files.cp(pjoin(self.me_dir, 'SubProcesses', 'MADatNLO.root'), 2850 pjoin(self.me_dir, 'Events', self.run_name)) 2851 logger.info('The results of this run and the ROOT file with the plots' + \ 2852 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 2853 elif self.analyse_card['fo_analysis_format'].lower() == 'lhe': 2854 self.combine_FO_lhe(jobs) 2855 logger.info('The results of this run and the LHE File (to be used for plotting only)' + \ 2856 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 2857 else: 2858 logger.info('The results of this run' + \ 2859 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name))
2860
2861 - def combine_FO_lhe(self,jobs):
2862 """combine the various lhe file generated in each directory. 2863 They are two steps: 2864 1) banner 2865 2) reweight each sample by the factor written at the end of each file 2866 3) concatenate each of the new files (gzip those). 2867 """ 2868 2869 logger.info('Combining lhe events for plotting analysis') 2870 start = time.time() 2871 self.run_card['fo_lhe_postprocessing'] = [i.lower() for i in self.run_card['fo_lhe_postprocessing']] 2872 output = pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz') 2873 if os.path.exists(output): 2874 os.remove(output) 2875 2876 2877 2878 2879 # 1. write the banner 2880 text = open(pjoin(jobs[0]['dirname'],'header.txt'),'r').read() 2881 i1, i2 = text.find('<initrwgt>'),text.find('</initrwgt>') 2882 self.banner['initrwgt'] = text[10+i1:i2] 2883 # 2884 # <init> 2885 # 2212 2212 6.500000e+03 6.500000e+03 0 0 247000 247000 -4 1 2886 # 8.430000e+02 2.132160e+00 8.430000e+02 1 2887 # <generator name='MadGraph5_aMC@NLO' version='2.5.2'>please cite 1405.0301 </generator> 2888 # </init> 2889 2890 cross = sum(j['result'] for j in jobs) 2891 error = math.sqrt(sum(j['error'] for j in jobs)) 2892 self.banner['init'] = "0 0 0e0 0e0 0 0 0 0 -4 1\n %s %s %s 1" % (cross, error, cross) 2893 self.banner.write(output[:-3], close_tag=False) 2894 misc.gzip(output[:-3]) 2895 2896 2897 2898 fsock = lhe_parser.EventFile(output,'a') 2899 if 'nogrouping' in self.run_card['fo_lhe_postprocessing']: 2900 fsock.eventgroup = False 2901 else: 2902 fsock.eventgroup = True 2903 2904 if 'norandom' in self.run_card['fo_lhe_postprocessing']: 2905 for job in jobs: 2906 dirname = job['dirname'] 2907 #read last line 2908 lastline = misc.BackRead(pjoin(dirname,'events.lhe')).readline() 2909 nb_event, sumwgt, cross = [float(i) for i in lastline.split()] 2910 # get normalisation ratio 2911 ratio = cross/sumwgt 2912 lhe = lhe_parser.EventFile(pjoin(dirname,'events.lhe')) 2913 lhe.eventgroup = True # read the events by eventgroup 2914 for eventsgroup in lhe: 2915 neweventsgroup = [] 2916 for i,event in enumerate(eventsgroup): 2917 event.rescale_weights(ratio) 2918 if i>0 and 'noidentification' not in self.run_card['fo_lhe_postprocessing'] \ 2919 and event == neweventsgroup[-1]: 2920 neweventsgroup[-1].wgt += event.wgt 2921 for key in event.reweight_data: 2922 neweventsgroup[-1].reweight_data[key] += event.reweight_data[key] 2923 else: 2924 neweventsgroup.append(event) 2925 fsock.write_events(neweventsgroup) 2926 lhe.close() 2927 os.remove(pjoin(dirname,'events.lhe')) 2928 else: 2929 lhe = [] 2930 lenlhe = [] 2931 misc.sprint('Need to combine %s event files' % len(jobs)) 2932 globallhe = lhe_parser.MultiEventFile() 2933 globallhe.eventgroup = True 2934 for job in jobs: 2935 dirname = job['dirname'] 2936 lastline = misc.BackRead(pjoin(dirname,'events.lhe')).readline() 2937 nb_event, sumwgt, cross = [float(i) for i in lastline.split()] 2938 lastlhe = globallhe.add(pjoin(dirname,'events.lhe'),cross, 0, cross, 2939 nb_event=int(nb_event), scale=cross/sumwgt) 2940 for eventsgroup in globallhe: 2941 neweventsgroup = [] 2942 for i,event in enumerate(eventsgroup): 2943 event.rescale_weights(event.sample_scale) 2944 if i>0 and 'noidentification' not in self.run_card['fo_lhe_postprocessing'] \ 2945 and event == neweventsgroup[-1]: 2946 neweventsgroup[-1].wgt += event.wgt 2947 for key in event.reweight_data: 2948 neweventsgroup[-1].reweight_data[key] += event.reweight_data[key] 2949 else: 2950 neweventsgroup.append(event) 2951 fsock.write_events(neweventsgroup) 2952 globallhe.close() 2953 fsock.write('</LesHouchesEvents>\n') 2954 fsock.close() 2955 misc.sprint('The combining of the LHE files has taken ', time.time()-start) 2956 for job in jobs: 2957 dirname = job['dirname'] 2958 os.remove(pjoin(dirname,'events.lhe')) 2959 2960 2961 2962 misc.sprint('The combining of the LHE files has taken ', time.time()-start)
2963 2964 2965 2966 2967 2968
2969 - def combine_plots_HwU(self,jobs,out,normalisation=None):
2970 """Sums all the plots in the HwU format.""" 2971 logger.debug('Combining HwU plots.') 2972 2973 command = [sys.executable] 2974 command.append(pjoin(self.me_dir, 'bin', 'internal','histograms.py')) 2975 for job in jobs: 2976 if job['dirname'].endswith('.HwU'): 2977 command.append(job['dirname']) 2978 else: 2979 command.append(pjoin(job['dirname'],'MADatNLO.HwU')) 2980 command.append("--out="+out) 2981 command.append("--gnuplot") 2982 command.append("--band=[]") 2983 command.append("--lhapdf-config="+self.options['lhapdf']) 2984 if normalisation: 2985 command.append("--multiply="+(','.join([str(n) for n in normalisation]))) 2986 command.append("--sum") 2987 command.append("--keep_all_weights") 2988 command.append("--no_open") 2989 2990 p = misc.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, cwd=self.me_dir) 2991 2992 while p.poll() is None: 2993 line = p.stdout.readline().decode() 2994 #misc.sprint(type(line)) 2995 if any(t in line for t in ['INFO:','WARNING:','CRITICAL:','ERROR:','KEEP:']): 2996 print(line[:-1]) 2997 elif __debug__ and line: 2998 logger.debug(line[:-1])
2999 3000
3001 - def pineappl_combine(self,cross,error,jobs):
3002 """Combines the PineAPPL grids in all the SubProcess/P*/all_G*/ directories""" 3003 logger.debug('Combining PineAPPL grids \n') 3004 all_jobs=[] 3005 for job in jobs: 3006 if job['resultABS'] == 0.0: 3007 logger.warning('pineappl_combine: Job\n%s\nwill be skipped, as it returned zero cross-section' % 3008 job['dirname']) 3009 continue 3010 all_jobs.append(job['dirname']) 3011 ngrids=len(all_jobs) 3012 nobs =len([name for name in os.listdir(all_jobs[0]) if name.endswith("_out.pineappl")]) 3013 for obs in range(0,nobs): 3014 gdir = [pjoin(job,"grid_obs_"+str(obs)+"_out.pineappl") for job in all_jobs] 3015 # combine PineAPPL grid from different channels for observable 'obs' 3016 if self.run_card["pineappl"]: 3017 unc2_inv=pow(cross/error,2) 3018 unc2_inv_ngrids=pow(cross/error,2)*ngrids 3019 misc.call(['pineappl','merge', pjoin(self.me_dir,"Events", 3020 self.run_name,"amcblast_obs_"+str(obs)+".pineappl")]+ gdir) 3021 3022 # after combining, delete the original grids 3023 for ggdir in gdir: 3024 os.remove(ggdir)
3025 3026
3027 - def collect_log_files(self, jobs, integration_step):
3028 """collect the log files and put them in a single, html-friendly file 3029 inside the Events/run_.../ directory""" 3030 log_file = pjoin(self.me_dir, 'Events', self.run_name, 3031 'alllogs_%d.html' % integration_step) 3032 outfile = open(log_file, 'w') 3033 3034 content = '' 3035 content += '<HTML><BODY>\n<font face="courier" size=2>' 3036 for job in jobs: 3037 # put an anchor 3038 log=pjoin(job['dirname'],'log_MINT%s.txt' % integration_step) 3039 content += '<a name=%s></a>\n' % (os.path.dirname(log).replace( 3040 pjoin(self.me_dir,'SubProcesses'),'')) 3041 # and put some nice header 3042 content += '<font color="red">\n' 3043 content += '<br>LOG file for integration channel %s, %s <br>' % \ 3044 (os.path.dirname(log).replace(pjoin(self.me_dir, 3045 'SubProcesses'), ''), 3046 integration_step) 3047 content += '</font>\n' 3048 #then just flush the content of the small log inside the big log 3049 #the PRE tag prints everything verbatim 3050 with open(log) as l: 3051 content += '<PRE>\n' + l.read() + '\n</PRE>' 3052 content +='<br>\n' 3053 outfile.write(content) 3054 content='' 3055 3056 outfile.write('</font>\n</BODY></HTML>\n') 3057 outfile.close()
3058 3059
3060 - def finalise_run_FO(self,folder_name,jobs):
3061 """Combine the plots and put the res*.txt files in the Events/run.../ folder.""" 3062 # Copy the res_*.txt files to the Events/run* folder 3063 res_files = misc.glob('res_*.txt', pjoin(self.me_dir, 'SubProcesses')) 3064 for res_file in res_files: 3065 files.mv(res_file,pjoin(self.me_dir, 'Events', self.run_name)) 3066 # Collect the plots and put them in the Events/run* folder 3067 self.combine_plots_FO(folder_name,jobs) 3068 # If PineAPPL is linked, combine the grid to be put inside Events/run_XX 3069 if self.run_card['pineappl']: 3070 cross=self.cross_sect_dict['xsect'] 3071 error=self.cross_sect_dict['errt'] 3072 self.pineappl_combine(cross,error,jobs)
3073 3074
3075 - def setup_cluster_or_multicore(self):
3076 """setup the number of cores for multicore, and the cluster-type for cluster runs""" 3077 if self.cluster_mode == 1: 3078 cluster_name = self.options['cluster_type'] 3079 try: 3080 self.cluster = cluster.from_name[cluster_name](**self.options) 3081 except KeyError: 3082 # Check if a plugin define this type of cluster 3083 # check for PLUGIN format 3084 cluster_class = misc.from_plugin_import(self.plugin_path, 3085 'new_cluster', cluster_name, 3086 info = 'cluster handling will be done with PLUGIN: %{plug}s' ) 3087 if cluster_class: 3088 self.cluster = cluster_class(**self.options) 3089 3090 if self.cluster_mode == 2: 3091 try: 3092 import multiprocessing 3093 if not self.nb_core: 3094 try: 3095 self.nb_core = int(self.options['nb_core']) 3096 except TypeError: 3097 self.nb_core = multiprocessing.cpu_count() 3098 logger.info('Using %d cores' % self.nb_core) 3099 except ImportError: 3100 self.nb_core = 1 3101 logger.warning('Impossible to detect the number of cores => Using one.\n'+ 3102 'Use set nb_core X in order to set this number and be able to '+ 3103 'run in multicore.') 3104 3105 self.cluster = cluster.MultiCore(**self.options)
3106 3107
3108 - def clean_previous_results(self,options,p_dirs,folder_name):
3109 """Clean previous results. 3110 o. If doing only the reweighting step, do not delete anything and return directlty. 3111 o. Always remove all the G*_* files (from split event generation). 3112 o. Remove the G* (or born_G* or all_G*) only when NOT doing only_generation or reweight_only.""" 3113 if options['reweightonly']: 3114 return 3115 if not options['only_generation']: 3116 self.update_status('Cleaning previous results', level=None) 3117 for dir in p_dirs: 3118 #find old folders to be removed 3119 for obj in folder_name: 3120 # list all the G* (or all_G* or born_G*) directories 3121 to_rm = [file for file in \ 3122 os.listdir(pjoin(self.me_dir, 'SubProcesses', dir)) \ 3123 if file.startswith(obj[:-1]) and \ 3124 (os.path.isdir(pjoin(self.me_dir, 'SubProcesses', dir, file)) or \ 3125 os.path.exists(pjoin(self.me_dir, 'SubProcesses', dir, file)))] 3126 # list all the G*_* directories (from split event generation) 3127 to_always_rm = [file for file in \ 3128 os.listdir(pjoin(self.me_dir, 'SubProcesses', dir)) \ 3129 if file.startswith(obj[:-1]) and 3130 '_' in file and not '_G' in file and \ 3131 (os.path.isdir(pjoin(self.me_dir, 'SubProcesses', dir, file)) or \ 3132 os.path.exists(pjoin(self.me_dir, 'SubProcesses', dir, file)))] 3133 3134 if not options['only_generation']: 3135 to_always_rm.extend(to_rm) 3136 if os.path.exists(pjoin(self.me_dir, 'SubProcesses', dir,'MadLoop5_resources.tar.gz')): 3137 to_always_rm.append(pjoin(self.me_dir, 'SubProcesses', dir,'MadLoop5_resources.tar.gz')) 3138 files.rm([pjoin(self.me_dir, 'SubProcesses', dir, d) for d in to_always_rm]) 3139 return
3140 3141
3142 - def print_summary(self, options, step, mode, scale_pdf_info=[], done=True):
3143 """print a summary of the results contained in self.cross_sect_dict. 3144 step corresponds to the mintMC step, if =2 (i.e. after event generation) 3145 some additional infos are printed""" 3146 # find process name 3147 proc_card_lines = open(pjoin(self.me_dir, 'Cards', 'proc_card_mg5.dat')).read().split('\n') 3148 process = '' 3149 for line in proc_card_lines: 3150 if line.startswith('generate') or line.startswith('add process'): 3151 process = process+(line.replace('generate ', '')).replace('add process ','')+' ; ' 3152 lpp = {0:'l', 1:'p', -1:'pbar', 2:'elastic photon from p', 3:'elastic photon from e'} 3153 if self.ninitial == 1: 3154 proc_info = '\n Process %s' % process[:-3] 3155 else: 3156 proc_info = '\n Process %s\n Run at %s-%s collider (%s + %s GeV)' % \ 3157 (process[:-3], lpp[self.run_card['lpp1']], lpp[self.run_card['lpp2']], 3158 self.run_card['ebeam1'], self.run_card['ebeam2']) 3159 3160 if self.ninitial == 1: 3161 self.cross_sect_dict['unit']='GeV' 3162 self.cross_sect_dict['xsec_string']='(Partial) decay width' 3163 self.cross_sect_dict['axsec_string']='(Partial) abs(decay width)' 3164 else: 3165 self.cross_sect_dict['unit']='pb' 3166 self.cross_sect_dict['xsec_string']='Total cross section' 3167 self.cross_sect_dict['axsec_string']='Total abs(cross section)' 3168 if self.run_card['event_norm'].lower()=='bias': 3169 self.cross_sect_dict['xsec_string']+=', incl. bias (DO NOT USE)' 3170 3171 if mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 3172 status = ['Determining the number of unweighted events per channel', 3173 'Updating the number of unweighted events per channel', 3174 'Summary:'] 3175 computed='(computed from LHE events)' 3176 elif mode in ['NLO', 'LO']: 3177 status = ['Results after grid setup:','Current results:', 3178 'Final results and run summary:'] 3179 computed='(computed from histogram information)' 3180 3181 if step != 2 and mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 3182 message = status[step] + '\n\n Intermediate results:' + \ 3183 ('\n Random seed: %(randinit)d' + \ 3184 '\n %(xsec_string)s: %(xsect)8.3e +- %(errt)6.1e %(unit)s' + \ 3185 '\n %(axsec_string)s: %(xseca)8.3e +- %(erra)6.1e %(unit)s \n') \ 3186 % self.cross_sect_dict 3187 elif mode in ['NLO','LO'] and not done: 3188 if step == 0: 3189 message = '\n ' + status[0] + \ 3190 '\n %(xsec_string)s: %(xsect)8.3e +- %(errt)6.1e %(unit)s' % \ 3191 self.cross_sect_dict 3192 else: 3193 message = '\n ' + status[1] + \ 3194 '\n %(xsec_string)s: %(xsect)8.3e +- %(errt)6.1e %(unit)s' % \ 3195 self.cross_sect_dict 3196 3197 else: 3198 message = '\n --------------------------------------------------------------' 3199 message = message + \ 3200 '\n ' + status[2] + proc_info 3201 if mode not in ['LO', 'NLO']: 3202 message = message + \ 3203 '\n Number of events generated: %s' % self.run_card['nevents'] 3204 message = message + \ 3205 '\n %(xsec_string)s: %(xsect)8.3e +- %(errt)6.1e %(unit)s' % \ 3206 self.cross_sect_dict 3207 message = message + \ 3208 '\n --------------------------------------------------------------' 3209 if scale_pdf_info and (self.run_card['nevents']>=10000 or mode in ['NLO', 'LO']): 3210 if scale_pdf_info[0]: 3211 # scale uncertainties 3212 message = message + '\n Scale variation %s:' % computed 3213 for s in scale_pdf_info[0]: 3214 if s['unc']: 3215 if self.run_card['ickkw'] != -1: 3216 message = message + \ 3217 ('\n Dynamical_scale_choice %(label)i (envelope of %(size)s values): '\ 3218 '\n %(cen)8.3e pb +%(max)0.1f%% -%(min)0.1f%%') % s 3219 else: 3220 message = message + \ 3221 ('\n Soft and hard scale dependence (added in quadrature): '\ 3222 '\n %(cen)8.3e pb +%(max_q)0.1f%% -%(min_q)0.1f%%') % s 3223 3224 else: 3225 message = message + \ 3226 ('\n Dynamical_scale_choice %(label)i: '\ 3227 '\n %(cen)8.3e pb') % s 3228 3229 if scale_pdf_info[1]: 3230 message = message + '\n PDF variation %s:' % computed 3231 for p in scale_pdf_info[1]: 3232 if p['unc']=='none': 3233 message = message + \ 3234 ('\n %(name)s (central value only): '\ 3235 '\n %(cen)8.3e pb') % p 3236 3237 elif p['unc']=='unknown': 3238 message = message + \ 3239 ('\n %(name)s (%(size)s members; combination method unknown): '\ 3240 '\n %(cen)8.3e pb') % p 3241 else: 3242 message = message + \ 3243 ('\n %(name)s (%(size)s members; using %(unc)s method): '\ 3244 '\n %(cen)8.3e pb +%(max)0.1f%% -%(min)0.1f%%') % p 3245 # pdf uncertainties 3246 message = message + \ 3247 '\n --------------------------------------------------------------' 3248 3249 3250 if (mode in ['NLO', 'LO'] and not done) or \ 3251 (mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO'] and step!=2): 3252 logger.info(message+'\n') 3253 return 3254 3255 # Some advanced general statistics are shown in the debug message at the 3256 # end of the run 3257 # Make sure it never stops a run 3258 # Gather some basic statistics for the run and extracted from the log files. 3259 if mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 3260 log_GV_files = misc.glob(pjoin('P*','G*','log_MINT*.txt'), 3261 pjoin(self.me_dir, 'SubProcesses')) 3262 all_log_files = log_GV_files 3263 elif mode == 'NLO': 3264 log_GV_files = misc.glob(pjoin('P*','all_G*','log_MINT*.txt'), 3265 pjoin(self.me_dir, 'SubProcesses')) 3266 all_log_files = log_GV_files 3267 3268 elif mode == 'LO': 3269 log_GV_files = '' 3270 all_log_files = misc.glob(pjoin('P*','born_G*','log_MINT*.txt'), 3271 pjoin(self.me_dir, 'SubProcesses')) 3272 else: 3273 raise aMCatNLOError( 'Run mode %s not supported.'%mode) 3274 3275 try: 3276 message, debug_msg = \ 3277 self.compile_advanced_stats(log_GV_files, all_log_files, message) 3278 except Exception as e: 3279 debug_msg = 'Advanced statistics collection failed with error "%s"\n'%str(e) 3280 err_string = StringIO.StringIO() 3281 traceback.print_exc(limit=4, file=err_string) 3282 debug_msg += 'Please report this backtrace to a MG5_aMC developer:\n%s'\ 3283 %err_string.getvalue() 3284 3285 logger.debug(debug_msg+'\n') 3286 logger.info(message+'\n') 3287 3288 # Now copy relevant information in the Events/Run_<xxx> directory 3289 evt_path = pjoin(self.me_dir, 'Events', self.run_name) 3290 open(pjoin(evt_path, 'summary.txt'),'w').write(message+'\n') 3291 open(pjoin(evt_path, '.full_summary.txt'), 3292 'w').write(message+'\n\n'+debug_msg+'\n') 3293 3294 self.archive_files(evt_path,mode)
3295
3296 - def archive_files(self, evt_path, mode):
3297 """ Copies in the Events/Run_<xxx> directory relevant files characterizing 3298 the run.""" 3299 3300 files_to_arxiv = [pjoin('Cards','param_card.dat'), 3301 pjoin('Cards','MadLoopParams.dat'), 3302 pjoin('Cards','FKS_params.dat'), 3303 pjoin('Cards','run_card.dat'), 3304 pjoin('Subprocesses','setscales.f'), 3305 pjoin('Subprocesses','cuts.f')] 3306 3307 if mode in ['NLO', 'LO']: 3308 files_to_arxiv.append(pjoin('Cards','FO_analyse_card.dat')) 3309 3310 if not os.path.exists(pjoin(evt_path,'RunMaterial')): 3311 os.mkdir(pjoin(evt_path,'RunMaterial')) 3312 3313 for path in files_to_arxiv: 3314 if os.path.isfile(pjoin(self.me_dir,path)): 3315 files.cp(pjoin(self.me_dir,path),pjoin(evt_path,'RunMaterial')) 3316 misc.call(['tar','-czpf','RunMaterial.tar.gz','RunMaterial'],cwd=evt_path) 3317 shutil.rmtree(pjoin(evt_path,'RunMaterial'))
3318
3319 - def compile_advanced_stats(self,log_GV_files,all_log_files,message):
3320 """ This functions goes through the log files given in arguments and 3321 compiles statistics about MadLoop stability, virtual integration 3322 optimization and detection of potential error messages into a nice 3323 debug message to printed at the end of the run """ 3324 3325 def safe_float(str_float): 3326 try: 3327 return float(str_float) 3328 except ValueError: 3329 logger.debug('Could not convert the following float during'+ 3330 ' advanced statistics printout: %s'%str(str_float)) 3331 return -1.0
3332 3333 3334 # > UPS is a dictionary of tuples with this format {channel:[nPS,nUPS]} 3335 # > Errors is a list of tuples with this format (log_file,nErrors) 3336 stats = {'UPS':{}, 'Errors':[], 'virt_stats':{}, 'timings':{}} 3337 mint_search = re.compile(r"MINT(?P<ID>\d*).txt") 3338 3339 # ================================== 3340 # == MadLoop stability statistics == 3341 # ================================== 3342 3343 # Recuperate the fraction of unstable PS points found in the runs for 3344 # the virtuals 3345 UPS_stat_finder = re.compile( 3346 r"Satistics from MadLoop:.*"+\ 3347 r"Total points tried\:\s+(?P<ntot>\d+).*"+\ 3348 r"Stability unknown\:\s+(?P<nsun>\d+).*"+\ 3349 r"Stable PS point\:\s+(?P<nsps>\d+).*"+\ 3350 r"Unstable PS point \(and rescued\)\:\s+(?P<nups>\d+).*"+\ 3351 r"Exceptional PS point \(unstable and not rescued\)\:\s+(?P<neps>\d+).*"+\ 3352 r"Double precision used\:\s+(?P<nddp>\d+).*"+\ 3353 r"Quadruple precision used\:\s+(?P<nqdp>\d+).*"+\ 3354 r"Initialization phase\-space points\:\s+(?P<nini>\d+).*"+\ 3355 r"Unknown return code \(100\)\:\s+(?P<n100>\d+).*"+\ 3356 r"Unknown return code \(10\)\:\s+(?P<n10>\d+).*",re.DOTALL) 3357 3358 unit_code_meaning = { 0 : 'Not identified (CTModeRun != -1)', 3359 1 : 'CutTools (double precision)', 3360 2 : 'PJFry++', 3361 3 : 'IREGI', 3362 4 : 'Golem95', 3363 5 : 'Samurai', 3364 6 : 'Ninja (double precision)', 3365 7 : 'COLLIER', 3366 8 : 'Ninja (quadruple precision)', 3367 9 : 'CutTools (quadruple precision)'} 3368 RetUnit_finder =re.compile( 3369 r"#Unit\s*(?P<unit>\d+)\s*=\s*(?P<n_occurences>\d+)") 3370 #Unit 3371 3372 for gv_log in log_GV_files: 3373 channel_name = '/'.join(gv_log.split('/')[-5:-1]) 3374 log=open(gv_log,'r').read() 3375 UPS_stats = re.search(UPS_stat_finder,log) 3376 for retunit_stats in re.finditer(RetUnit_finder, log): 3377 if channel_name not in list(stats['UPS'].keys()): 3378 stats['UPS'][channel_name] = [0]*10+[[0]*10] 3379 stats['UPS'][channel_name][10][int(retunit_stats.group('unit'))] \ 3380 += int(retunit_stats.group('n_occurences')) 3381 if not UPS_stats is None: 3382 try: 3383 stats['UPS'][channel_name][0] += int(UPS_stats.group('ntot')) 3384 stats['UPS'][channel_name][1] += int(UPS_stats.group('nsun')) 3385 stats['UPS'][channel_name][2] += int(UPS_stats.group('nsps')) 3386 stats['UPS'][channel_name][3] += int(UPS_stats.group('nups')) 3387 stats['UPS'][channel_name][4] += int(UPS_stats.group('neps')) 3388 stats['UPS'][channel_name][5] += int(UPS_stats.group('nddp')) 3389 stats['UPS'][channel_name][6] += int(UPS_stats.group('nqdp')) 3390 stats['UPS'][channel_name][7] += int(UPS_stats.group('nini')) 3391 stats['UPS'][channel_name][8] += int(UPS_stats.group('n100')) 3392 stats['UPS'][channel_name][9] += int(UPS_stats.group('n10')) 3393 except KeyError: 3394 stats['UPS'][channel_name] = [int(UPS_stats.group('ntot')), 3395 int(UPS_stats.group('nsun')),int(UPS_stats.group('nsps')), 3396 int(UPS_stats.group('nups')),int(UPS_stats.group('neps')), 3397 int(UPS_stats.group('nddp')),int(UPS_stats.group('nqdp')), 3398 int(UPS_stats.group('nini')),int(UPS_stats.group('n100')), 3399 int(UPS_stats.group('n10')),[0]*10] 3400 debug_msg = "" 3401 if len(list(stats['UPS'].keys()))>0: 3402 nTotPS = sum([chan[0] for chan in stats['UPS'].values()],0) 3403 nTotsun = sum([chan[1] for chan in stats['UPS'].values()],0) 3404 nTotsps = sum([chan[2] for chan in stats['UPS'].values()],0) 3405 nTotups = sum([chan[3] for chan in stats['UPS'].values()],0) 3406 nToteps = sum([chan[4] for chan in stats['UPS'].values()],0) 3407 nTotddp = sum([chan[5] for chan in stats['UPS'].values()],0) 3408 nTotqdp = sum([chan[6] for chan in stats['UPS'].values()],0) 3409 nTotini = sum([chan[7] for chan in stats['UPS'].values()],0) 3410 nTot100 = sum([chan[8] for chan in stats['UPS'].values()],0) 3411 nTot10 = sum([chan[9] for chan in stats['UPS'].values()],0) 3412 nTot1 = [sum([chan[10][i] for chan in stats['UPS'].values()],0) \ 3413 for i in range(10)] 3414 UPSfracs = [(chan[0] , 0.0 if chan[1][0]==0 else \ 3415 safe_float(chan[1][4]*100)/chan[1][0]) for chan in stats['UPS'].items()] 3416 maxUPS = max(UPSfracs, key = lambda w: w[1]) 3417 3418 tmpStr = "" 3419 tmpStr += '\n Number of loop ME evaluations (by MadLoop): %d'%nTotPS 3420 tmpStr += '\n Stability unknown: %d'%nTotsun 3421 tmpStr += '\n Stable PS point: %d'%nTotsps 3422 tmpStr += '\n Unstable PS point (and rescued): %d'%nTotups 3423 tmpStr += '\n Unstable PS point (and not rescued): %d'%nToteps 3424 tmpStr += '\n Only double precision used: %d'%nTotddp 3425 tmpStr += '\n Quadruple precision used: %d'%nTotqdp 3426 tmpStr += '\n Initialization phase-space points: %d'%nTotini 3427 tmpStr += '\n Reduction methods used:' 3428 red_methods = [(unit_code_meaning[i],nTot1[i]) for i in \ 3429 unit_code_meaning.keys() if nTot1[i]>0] 3430 for method, n in sorted(red_methods, key= lambda l: l[1], reverse=True): 3431 tmpStr += '\n > %s%s%s'%(method,' '*(33-len(method)),n) 3432 if nTot100 != 0: 3433 debug_msg += '\n Unknown return code (100): %d'%nTot100 3434 if nTot10 != 0: 3435 debug_msg += '\n Unknown return code (10): %d'%nTot10 3436 nUnknownUnit = sum(nTot1[u] for u in range(10) if u \ 3437 not in list(unit_code_meaning.keys())) 3438 if nUnknownUnit != 0: 3439 debug_msg += '\n Unknown return code (1): %d'\ 3440 %nUnknownUnit 3441 3442 if maxUPS[1]>0.001: 3443 message += tmpStr 3444 message += '\n Total number of unstable PS point detected:'+\ 3445 ' %d (%4.2f%%)'%(nToteps,safe_float(100*nToteps)/nTotPS) 3446 message += '\n Maximum fraction of UPS points in '+\ 3447 'channel %s (%4.2f%%)'%maxUPS 3448 message += '\n Please report this to the authors while '+\ 3449 'providing the file' 3450 message += '\n %s'%str(pjoin(os.path.dirname(self.me_dir), 3451 maxUPS[0],'UPS.log')) 3452 else: 3453 debug_msg += tmpStr 3454 3455 3456 # ==================================================== 3457 # == aMC@NLO virtual integration optimization stats == 3458 # ==================================================== 3459 3460 virt_tricks_finder = re.compile( 3461 r"accumulated results Virtual ratio\s*=\s*-?(?P<v_ratio>[\d\+-Eed\.]*)"+\ 3462 r"\s*\+/-\s*-?[\d\+-Eed\.]*\s*\(\s*-?(?P<v_ratio_err>[\d\+-Eed\.]*)\s*\%\)\s*\n"+\ 3463 r"accumulated results ABS virtual\s*=\s*-?(?P<v_abs_contr>[\d\+-Eed\.]*)"+\ 3464 r"\s*\+/-\s*-?[\d\+-Eed\.]*\s*\(\s*-?(?P<v_abs_contr_err>[\d\+-Eed\.]*)\s*\%\)") 3465 3466 virt_frac_finder = re.compile(r"update virtual fraction to\s*:\s*"+\ 3467 "-?(?P<v_frac>[\d\+-Eed\.]*)\s*") 3468 3469 channel_contr_finder = re.compile(r"Final result \[ABS\]\s*:\s*-?(?P<v_contr>[\d\+-Eed\.]*)") 3470 3471 channel_contr_list = {} 3472 for gv_log in log_GV_files: 3473 logfile=open(gv_log,'r') 3474 log = logfile.read() 3475 logfile.close() 3476 channel_name = '/'.join(gv_log.split('/')[-3:-1]) 3477 vf_stats = None 3478 for vf_stats in re.finditer(virt_frac_finder, log): 3479 pass 3480 if not vf_stats is None: 3481 v_frac = safe_float(vf_stats.group('v_frac')) 3482 ###v_average = safe_float(vf_stats.group('v_average')) 3483 try: 3484 if v_frac < stats['virt_stats']['v_frac_min'][0]: 3485 stats['virt_stats']['v_frac_min']=(v_frac,channel_name) 3486 if v_frac > stats['virt_stats']['v_frac_max'][0]: 3487 stats['virt_stats']['v_frac_max']=(v_frac,channel_name) 3488 stats['virt_stats']['v_frac_avg'][0] += v_frac 3489 stats['virt_stats']['v_frac_avg'][1] += 1 3490 except KeyError: 3491 stats['virt_stats']['v_frac_min']=[v_frac,channel_name] 3492 stats['virt_stats']['v_frac_max']=[v_frac,channel_name] 3493 stats['virt_stats']['v_frac_avg']=[v_frac,1] 3494 3495 3496 ccontr_stats = None 3497 for ccontr_stats in re.finditer(channel_contr_finder, log): 3498 pass 3499 if not ccontr_stats is None: 3500 contrib = safe_float(ccontr_stats.group('v_contr')) 3501 try: 3502 if contrib>channel_contr_list[channel_name]: 3503 channel_contr_list[channel_name]=contrib 3504 except KeyError: 3505 channel_contr_list[channel_name]=contrib 3506 3507 3508 # Now build the list of relevant virt log files to look for the maxima 3509 # of virt fractions and such. 3510 average_contrib = 0.0 3511 for value in channel_contr_list.values(): 3512 average_contrib += value 3513 if len(list(channel_contr_list.values())) !=0: 3514 average_contrib = average_contrib / len(list(channel_contr_list.values())) 3515 3516 relevant_log_GV_files = [] 3517 excluded_channels = set([]) 3518 all_channels = set([]) 3519 for log_file in log_GV_files: 3520 channel_name = '/'.join(log_file.split('/')[-3:-1]) 3521 all_channels.add(channel_name) 3522 try: 3523 if channel_contr_list[channel_name] > (0.1*average_contrib): 3524 relevant_log_GV_files.append(log_file) 3525 else: 3526 excluded_channels.add(channel_name) 3527 except KeyError: 3528 relevant_log_GV_files.append(log_file) 3529 3530 # Now we want to use the latest occurence of accumulated result in the log file 3531 for gv_log in relevant_log_GV_files: 3532 logfile=open(gv_log,'r') 3533 log = logfile.read() 3534 logfile.close() 3535 channel_name = '/'.join(gv_log.split('/')[-3:-1]) 3536 3537 vt_stats = None 3538 for vt_stats in re.finditer(virt_tricks_finder, log): 3539 pass 3540 if not vt_stats is None: 3541 vt_stats_group = vt_stats.groupdict() 3542 v_ratio = safe_float(vt_stats.group('v_ratio')) 3543 v_ratio_err = safe_float(vt_stats.group('v_ratio_err')) 3544 v_contr = safe_float(vt_stats.group('v_abs_contr')) 3545 v_contr_err = safe_float(vt_stats.group('v_abs_contr_err')) 3546 try: 3547 if v_ratio < stats['virt_stats']['v_ratio_min'][0]: 3548 stats['virt_stats']['v_ratio_min']=(v_ratio,channel_name) 3549 if v_ratio > stats['virt_stats']['v_ratio_max'][0]: 3550 stats['virt_stats']['v_ratio_max']=(v_ratio,channel_name) 3551 if v_ratio < stats['virt_stats']['v_ratio_err_min'][0]: 3552 stats['virt_stats']['v_ratio_err_min']=(v_ratio_err,channel_name) 3553 if v_ratio > stats['virt_stats']['v_ratio_err_max'][0]: 3554 stats['virt_stats']['v_ratio_err_max']=(v_ratio_err,channel_name) 3555 if v_contr < stats['virt_stats']['v_contr_min'][0]: 3556 stats['virt_stats']['v_contr_min']=(v_contr,channel_name) 3557 if v_contr > stats['virt_stats']['v_contr_max'][0]: 3558 stats['virt_stats']['v_contr_max']=(v_contr,channel_name) 3559 if v_contr_err < stats['virt_stats']['v_contr_err_min'][0]: 3560 stats['virt_stats']['v_contr_err_min']=(v_contr_err,channel_name) 3561 if v_contr_err > stats['virt_stats']['v_contr_err_max'][0]: 3562 stats['virt_stats']['v_contr_err_max']=(v_contr_err,channel_name) 3563 except KeyError: 3564 stats['virt_stats']['v_ratio_min']=[v_ratio,channel_name] 3565 stats['virt_stats']['v_ratio_max']=[v_ratio,channel_name] 3566 stats['virt_stats']['v_ratio_err_min']=[v_ratio_err,channel_name] 3567 stats['virt_stats']['v_ratio_err_max']=[v_ratio_err,channel_name] 3568 stats['virt_stats']['v_contr_min']=[v_contr,channel_name] 3569 stats['virt_stats']['v_contr_max']=[v_contr,channel_name] 3570 stats['virt_stats']['v_contr_err_min']=[v_contr_err,channel_name] 3571 stats['virt_stats']['v_contr_err_max']=[v_contr_err,channel_name] 3572 3573 vf_stats = None 3574 for vf_stats in re.finditer(virt_frac_finder, log): 3575 pass 3576 ##if not vf_stats is None: 3577 ## v_frac = safe_float(vf_stats.group('v_frac')) 3578 ## v_average = safe_float(vf_stats.group('v_average')) 3579 ## try: 3580 ## if v_average < stats['virt_stats']['v_average_min'][0]: 3581 ## stats['virt_stats']['v_average_min']=(v_average,channel_name) 3582 ## if v_average > stats['virt_stats']['v_average_max'][0]: 3583 ## stats['virt_stats']['v_average_max']=(v_average,channel_name) 3584 ## stats['virt_stats']['v_average_avg'][0] += v_average 3585 ## stats['virt_stats']['v_average_avg'][1] += 1 3586 ## except KeyError: 3587 ## stats['virt_stats']['v_average_min']=[v_average,channel_name] 3588 ## stats['virt_stats']['v_average_max']=[v_average,channel_name] 3589 ## stats['virt_stats']['v_average_avg']=[v_average,1] 3590 3591 try: 3592 debug_msg += '\n\n Statistics on virtual integration optimization : ' 3593 3594 debug_msg += '\n Maximum virt fraction computed %.3f (%s)'\ 3595 %tuple(stats['virt_stats']['v_frac_max']) 3596 debug_msg += '\n Minimum virt fraction computed %.3f (%s)'\ 3597 %tuple(stats['virt_stats']['v_frac_min']) 3598 debug_msg += '\n Average virt fraction computed %.3f'\ 3599 %safe_float(stats['virt_stats']['v_frac_avg'][0]/safe_float(stats['virt_stats']['v_frac_avg'][1])) 3600 debug_msg += '\n Stats below exclude negligible channels (%d excluded out of %d)'%\ 3601 (len(excluded_channels),len(all_channels)) 3602 ##debug_msg += '\n Maximum virt ratio used %.2f (%s)'\ 3603 ## %tuple(stats['virt_stats']['v_average_max']) 3604 debug_msg += '\n Maximum virt ratio found from grids %.2f (%s)'\ 3605 %tuple(stats['virt_stats']['v_ratio_max']) 3606 tmpStr = '\n Max. MC err. on virt ratio from grids %.1f %% (%s)'\ 3607 %tuple(stats['virt_stats']['v_ratio_err_max']) 3608 debug_msg += tmpStr 3609 # After all it was decided that it is better not to alarm the user unecessarily 3610 # with such printout of the statistics. 3611 # if stats['virt_stats']['v_ratio_err_max'][0]>100.0 or \ 3612 # stats['virt_stats']['v_ratio_err_max'][0]>100.0: 3613 # message += "\n Suspiciously large MC error in :" 3614 # if stats['virt_stats']['v_ratio_err_max'][0]>100.0: 3615 # message += tmpStr 3616 3617 tmpStr = '\n Maximum MC error on abs virt %.1f %% (%s)'\ 3618 %tuple(stats['virt_stats']['v_contr_err_max']) 3619 debug_msg += tmpStr 3620 # if stats['virt_stats']['v_contr_err_max'][0]>100.0: 3621 # message += tmpStr 3622 3623 3624 except KeyError: 3625 debug_msg += '\n Could not find statistics on the integration optimization. ' 3626 3627 # ======================================= 3628 # == aMC@NLO timing profile statistics == 3629 # ======================================= 3630 3631 timing_stat_finder = re.compile(r"\s*Time spent in\s*(?P<name>\w*)\s*:\s*"+\ 3632 "(?P<time>[\d\+-Eed\.]*)\s*") 3633 3634 for logf in log_GV_files: 3635 logfile=open(logf,'r') 3636 log = logfile.read() 3637 logfile.close() 3638 channel_name = '/'.join(logf.split('/')[-3:-1]) 3639 mint = re.search(mint_search,logf) 3640 if not mint is None: 3641 channel_name = channel_name+' [step %s]'%mint.group('ID') 3642 3643 for time_stats in re.finditer(timing_stat_finder, log): 3644 try: 3645 stats['timings'][time_stats.group('name')][channel_name]+=\ 3646 safe_float(time_stats.group('time')) 3647 except KeyError: 3648 if time_stats.group('name') not in list(stats['timings'].keys()): 3649 stats['timings'][time_stats.group('name')] = {} 3650 stats['timings'][time_stats.group('name')][channel_name]=\ 3651 safe_float(time_stats.group('time')) 3652 3653 # useful inline function 3654 Tstr = lambda secs: str(datetime.timedelta(seconds=int(secs))) 3655 try: 3656 totTimeList = [(time, chan) for chan, time in \ 3657 stats['timings']['Total'].items()] 3658 except KeyError: 3659 totTimeList = [] 3660 3661 totTimeList.sort() 3662 if len(totTimeList)>0: 3663 debug_msg += '\n\n Inclusive timing profile :' 3664 debug_msg += '\n Overall slowest channel %s (%s)'%\ 3665 (Tstr(totTimeList[-1][0]),totTimeList[-1][1]) 3666 debug_msg += '\n Average channel running time %s'%\ 3667 Tstr(sum([el[0] for el in totTimeList])/len(totTimeList)) 3668 debug_msg += '\n Aggregated total running time %s'%\ 3669 Tstr(sum([el[0] for el in totTimeList])) 3670 else: 3671 debug_msg += '\n\n Inclusive timing profile non available.' 3672 3673 sorted_keys = sorted(list(stats['timings'].keys()), key= lambda stat: \ 3674 sum(stats['timings'][stat].values()), reverse=True) 3675 for name in sorted_keys: 3676 if name=='Total': 3677 continue 3678 if sum(stats['timings'][name].values())<=0.0: 3679 debug_msg += '\n Zero time record for %s.'%name 3680 continue 3681 try: 3682 TimeList = [((100.0*time/stats['timings']['Total'][chan]), 3683 chan) for chan, time in stats['timings'][name].items()] 3684 except KeyError as ZeroDivisionError: 3685 debug_msg += '\n\n Timing profile for %s unavailable.'%name 3686 continue 3687 TimeList.sort() 3688 debug_msg += '\n Timing profile for <%s> :'%name 3689 try: 3690 debug_msg += '\n Overall fraction of time %.3f %%'%\ 3691 safe_float((100.0*(sum(stats['timings'][name].values())/ 3692 sum(stats['timings']['Total'].values())))) 3693 except KeyError as ZeroDivisionError: 3694 debug_msg += '\n Overall fraction of time unavailable.' 3695 debug_msg += '\n Largest fraction of time %.3f %% (%s)'%\ 3696 (TimeList[-1][0],TimeList[-1][1]) 3697 debug_msg += '\n Smallest fraction of time %.3f %% (%s)'%\ 3698 (TimeList[0][0],TimeList[0][1]) 3699 3700 # ============================= 3701 # == log file eror detection == 3702 # ============================= 3703 3704 # Find the number of potential errors found in all log files 3705 # This re is a simple match on a case-insensitve 'error' but there is 3706 # also some veto added for excluding the sentence 3707 # "See Section 6 of paper for error calculation." 3708 # which appear in the header of lhapdf in the logs. 3709 err_finder = re.compile(\ 3710 r"(?<!of\spaper\sfor\s)\bERROR\b(?!\scalculation\.)",re.IGNORECASE) 3711 for log in all_log_files: 3712 logfile=open(log,'r') 3713 nErrors = len(re.findall(err_finder, logfile.read())) 3714 logfile.close() 3715 if nErrors != 0: 3716 stats['Errors'].append((str(log),nErrors)) 3717 3718 nErrors = sum([err[1] for err in stats['Errors']],0) 3719 if nErrors != 0: 3720 debug_msg += '\n WARNING:: A total of %d error%s ha%s been '\ 3721 %(nErrors,'s' if nErrors>1 else '','ve' if nErrors>1 else 's')+\ 3722 'found in the following log file%s:'%('s' if \ 3723 len(stats['Errors'])>1 else '') 3724 for error in stats['Errors'][:3]: 3725 log_name = '/'.join(error[0].split('/')[-5:]) 3726 debug_msg += '\n > %d error%s in %s'%\ 3727 (error[1],'s' if error[1]>1 else '',log_name) 3728 if len(stats['Errors'])>3: 3729 nRemainingErrors = sum([err[1] for err in stats['Errors']][3:],0) 3730 nRemainingLogs = len(stats['Errors'])-3 3731 debug_msg += '\n And another %d error%s in %d other log file%s'%\ 3732 (nRemainingErrors, 's' if nRemainingErrors>1 else '', 3733 nRemainingLogs, 's ' if nRemainingLogs>1 else '') 3734 3735 return message, debug_msg 3736 3737
3738 - def reweight_and_collect_events(self, options, mode, nevents, event_norm):
3739 """this function calls the reweighting routines and creates the event file in the 3740 Event dir. Return the name of the event file created 3741 """ 3742 scale_pdf_info=[] 3743 if any(self.run_card['reweight_scale']) or any(self.run_card['reweight_PDF']) or \ 3744 len(self.run_card['dynamical_scale_choice']) > 1 or len(self.run_card['lhaid']) > 1\ 3745 or self.run_card['store_rwgt_info']: 3746 scale_pdf_info = self.run_reweight(options['reweightonly']) 3747 self.update_status('Collecting events', level='parton', update_results=True) 3748 misc.compile(['collect_events'], 3749 cwd=pjoin(self.me_dir, 'SubProcesses'), nocompile=options['nocompile']) 3750 p = misc.Popen(['./collect_events'], cwd=pjoin(self.me_dir, 'SubProcesses'), 3751 stdin=subprocess.PIPE, 3752 stdout=open(pjoin(self.me_dir, 'collect_events.log'), 'w')) 3753 if event_norm.lower() == 'sum': 3754 p.communicate(input = '1\n'.encode()) 3755 elif event_norm.lower() == 'unity': 3756 p.communicate(input = '3\n'.encode()) 3757 elif event_norm.lower() == 'bias': 3758 p.communicate(input = '0\n'.encode()) 3759 else: 3760 p.communicate(input = '2\n'.encode()) 3761 3762 #get filename from collect events 3763 filename = open(pjoin(self.me_dir, 'collect_events.log')).read().split()[-1] 3764 3765 if not os.path.exists(pjoin(self.me_dir, 'SubProcesses', filename)): 3766 raise aMCatNLOError('An error occurred during event generation. ' + \ 3767 'The event file has not been created. Check collect_events.log') 3768 evt_file = pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz') 3769 misc.gzip(pjoin(self.me_dir, 'SubProcesses', filename), stdout=evt_file) 3770 if not options['reweightonly']: 3771 self.print_summary(options, 2, mode, scale_pdf_info) 3772 res_files = misc.glob('res*.txt', pjoin(self.me_dir, 'SubProcesses')) 3773 for res_file in res_files: 3774 files.mv(res_file,pjoin(self.me_dir, 'Events', self.run_name)) 3775 3776 logger.info('The %s file has been generated.\n' % (evt_file)) 3777 self.results.add_detail('nb_event', nevents) 3778 self.update_status('Events generated', level='parton', update_results=True) 3779 return evt_file[:-3]
3780 3781
3782 - def run_mcatnlo(self, evt_file, options):
3783 """runs mcatnlo on the generated event file, to produce showered-events 3784 """ 3785 logger.info('Preparing MCatNLO run') 3786 try: 3787 misc.gunzip(evt_file) 3788 except Exception: 3789 pass 3790 3791 self.banner = banner_mod.Banner(evt_file) 3792 shower = self.banner.get_detail('run_card', 'parton_shower').upper() 3793 3794 #check that the number of split event files divides the number of 3795 # events, otherwise set it to 1 3796 if int(self.banner.get_detail('run_card', 'nevents') / \ 3797 self.shower_card['nsplit_jobs']) * self.shower_card['nsplit_jobs'] \ 3798 != self.banner.get_detail('run_card', 'nevents'): 3799 logger.warning(\ 3800 'nsplit_jobs in the shower card is not a divisor of the number of events.\n' + \ 3801 'Setting it to 1.') 3802 self.shower_card['nsplit_jobs'] = 1 3803 3804 # don't split jobs if the user asks to shower only a part of the events 3805 if self.shower_card['nevents'] > 0 and \ 3806 self.shower_card['nevents'] < self.banner.get_detail('run_card', 'nevents') and \ 3807 self.shower_card['nsplit_jobs'] != 1: 3808 logger.warning(\ 3809 'Only a part of the events will be showered.\n' + \ 3810 'Setting nsplit_jobs in the shower_card to 1.') 3811 self.shower_card['nsplit_jobs'] = 1 3812 3813 self.banner_to_mcatnlo(evt_file) 3814 3815 # if fastjet has to be linked (in extralibs) then 3816 # add lib /include dirs for fastjet if fastjet-config is present on the 3817 # system, otherwise add fjcore to the files to combine 3818 if 'fastjet' in self.shower_card['extralibs']: 3819 #first, check that stdc++ is also linked 3820 if not 'stdc++' in self.shower_card['extralibs']: 3821 logger.warning('Linking FastJet: adding stdc++ to EXTRALIBS') 3822 self.shower_card['extralibs'] += ' stdc++' 3823 # then check if options[fastjet] corresponds to a valid fj installation 3824 try: 3825 #this is for a complete fj installation 3826 p = subprocess.Popen([self.options['fastjet'], '--prefix'], \ 3827 stdout=subprocess.PIPE, stderr=subprocess.PIPE) 3828 output, error = p.communicate() 3829 #remove the line break from output (last character) 3830 output = output.decode()[:-1] 3831 # add lib/include paths 3832 if not pjoin(output, 'lib') in self.shower_card['extrapaths']: 3833 logger.warning('Linking FastJet: updating EXTRAPATHS') 3834 self.shower_card['extrapaths'] += ' ' + pjoin(output, 'lib') 3835 if not pjoin(output, 'include') in self.shower_card['includepaths']: 3836 logger.warning('Linking FastJet: updating INCLUDEPATHS') 3837 self.shower_card['includepaths'] += ' ' + pjoin(output, 'include') 3838 # to be changed in the fortran wrapper 3839 include_line = '#include "fastjet/ClusterSequence.hh"//INCLUDE_FJ' 3840 namespace_line = 'namespace fj = fastjet;//NAMESPACE_FJ' 3841 except Exception: 3842 logger.warning('Linking FastJet: using fjcore') 3843 # this is for FJcore, so no FJ library has to be linked 3844 self.shower_card['extralibs'] = self.shower_card['extralibs'].replace('fastjet', '') 3845 if not 'fjcore.o' in self.shower_card['analyse']: 3846 self.shower_card['analyse'] += ' fjcore.o' 3847 # to be changed in the fortran wrapper 3848 include_line = '#include "fjcore.hh"//INCLUDE_FJ' 3849 namespace_line = 'namespace fj = fjcore;//NAMESPACE_FJ' 3850 # change the fortran wrapper with the correct namespaces/include 3851 fjwrapper_lines = open(pjoin(self.me_dir, 'MCatNLO', 'srcCommon', 'myfastjetfortran.cc')).read().split('\n') 3852 for line in fjwrapper_lines: 3853 if '//INCLUDE_FJ' in line: 3854 fjwrapper_lines[fjwrapper_lines.index(line)] = include_line 3855 if '//NAMESPACE_FJ' in line: 3856 fjwrapper_lines[fjwrapper_lines.index(line)] = namespace_line 3857 with open(pjoin(self.me_dir, 'MCatNLO', 'srcCommon', 'myfastjetfortran.cc'), 'w') as fsock: 3858 fsock.write('\n'.join(fjwrapper_lines) + '\n') 3859 3860 extrapaths = self.shower_card['extrapaths'].split() 3861 3862 # check that the path needed by HW++ and PY8 are set if one uses these shower 3863 if shower in ['HERWIGPP', 'PYTHIA8']: 3864 path_dict = {'HERWIGPP': ['hepmc_path', 3865 'thepeg_path', 3866 'hwpp_path'], 3867 'PYTHIA8': ['pythia8_path']} 3868 3869 if not all([self.options[ppath] and os.path.exists(self.options[ppath]) for ppath in path_dict[shower]]): 3870 raise aMCatNLOError('Some paths are missing or invalid in the configuration file.\n' + \ 3871 ('Please make sure you have set these variables: %s' % ', '.join(path_dict[shower]))) 3872 3873 if shower == 'HERWIGPP': 3874 extrapaths.append(pjoin(self.options['hepmc_path'], 'lib')) 3875 self.shower_card['extrapaths'] += ' %s' % pjoin(self.options['hepmc_path'], 'lib') 3876 3877 # add the HEPMC path of the pythia8 installation 3878 if shower == 'PYTHIA8': 3879 hepmc = subprocess.Popen([pjoin(self.options['pythia8_path'], 'bin', 'pythia8-config'), '--hepmc2'], 3880 stdout = subprocess.PIPE).stdout.read().decode().strip() 3881 #this gives all the flags, i.e. 3882 #-I/Path/to/HepMC/include -L/Path/to/HepMC/lib -lHepMC 3883 # we just need the path to the HepMC libraries 3884 extrapaths.append(hepmc.split()[1].replace('-L', '')) 3885 3886 if shower == 'PYTHIA8' and not os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')): 3887 extrapaths.append(pjoin(self.options['pythia8_path'], 'lib')) 3888 3889 # HwU.f now contains some calls to the PineAPPL functions. One should add the 3890 # dummy interface to have them linked 3891 if "HwU.o" in self.shower_card['analyse'] and \ 3892 not "pineappl_interface_dummy.o" in self.shower_card['analyse']: 3893 self.shower_card['analyse'] += " pineappl_interface_dummy.o" 3894 3895 # set the PATH for the dynamic libraries 3896 if sys.platform == 'darwin': 3897 ld_library_path = 'DYLD_LIBRARY_PATH' 3898 else: 3899 ld_library_path = 'LD_LIBRARY_PATH' 3900 if ld_library_path in list(os.environ.keys()): 3901 paths = os.environ[ld_library_path] 3902 else: 3903 paths = '' 3904 paths += ':' + ':'.join(extrapaths) 3905 os.putenv(ld_library_path, paths) 3906 3907 shower_card_path = pjoin(self.me_dir, 'MCatNLO', 'shower_card.dat') 3908 self.shower_card.write_card(shower, shower_card_path) 3909 3910 # overwrite if shower_card_set.dat exists in MCatNLO 3911 if os.path.exists(pjoin(self.me_dir, 'MCatNLO', 'shower_card_set.dat')): 3912 files.mv(pjoin(self.me_dir, 'MCatNLO', 'shower_card_set.dat'), 3913 pjoin(self.me_dir, 'MCatNLO', 'shower_card.dat')) 3914 3915 mcatnlo_log = pjoin(self.me_dir, 'mcatnlo.log') 3916 self.update_status('Compiling MCatNLO for %s...' % shower, level='shower') 3917 3918 3919 # libdl may be needded for pythia 82xx 3920 #if shower == 'PYTHIA8' and not \ 3921 # os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')) and \ 3922 # 'dl' not in self.shower_card['extralibs'].split(): 3923 # # 'dl' has to be linked with the extralibs 3924 # self.shower_card['extralibs'] += ' dl' 3925 # logger.warning("'dl' was added to extralibs from the shower_card.dat.\n" + \ 3926 # "It is needed for the correct running of PY8.2xx.\n" + \ 3927 # "If this library cannot be found on your system, a crash will occur.") 3928 3929 misc.call(['./MCatNLO_MadFKS.inputs'], stdout=open(mcatnlo_log, 'w'), 3930 stderr=open(mcatnlo_log, 'w'), 3931 cwd=pjoin(self.me_dir, 'MCatNLO'), 3932 close_fds=True) 3933 3934 exe = 'MCATNLO_%s_EXE' % shower 3935 if not os.path.exists(pjoin(self.me_dir, 'MCatNLO', exe)) and \ 3936 not os.path.exists(pjoin(self.me_dir, 'MCatNLO', 'Pythia8.exe')): 3937 print(open(mcatnlo_log).read()) 3938 raise aMCatNLOError('Compilation failed, check %s for details' % mcatnlo_log) 3939 logger.info(' ... done') 3940 3941 # create an empty dir where to run 3942 count = 1 3943 while os.path.isdir(pjoin(self.me_dir, 'MCatNLO', 'RUN_%s_%d' % \ 3944 (shower, count))): 3945 count += 1 3946 rundir = pjoin(self.me_dir, 'MCatNLO', 'RUN_%s_%d' % \ 3947 (shower, count)) 3948 os.mkdir(rundir) 3949 files.cp(shower_card_path, rundir) 3950 3951 #look for the event files (don't resplit if one asks for the 3952 # same number of event files as in the previous run) 3953 event_files = misc.glob('events_*.lhe', pjoin(self.me_dir, 'Events', self.run_name)) 3954 if max(len(event_files), 1) != self.shower_card['nsplit_jobs']: 3955 logger.info('Cleaning old files and splitting the event file...') 3956 #clean the old files 3957 files.rm([f for f in event_files if 'events.lhe' not in f]) 3958 if self.shower_card['nsplit_jobs'] > 1: 3959 misc.compile(['split_events'], cwd = pjoin(self.me_dir, 'Utilities'), nocompile=options['nocompile']) 3960 p = misc.Popen([pjoin(self.me_dir, 'Utilities', 'split_events')], 3961 stdin=subprocess.PIPE, 3962 stdout=open(pjoin(self.me_dir, 'Events', self.run_name, 'split_events.log'), 'w'), 3963 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 3964 p.communicate(input = ('events.lhe\n%d\n' % self.shower_card['nsplit_jobs']).encode()) 3965 logger.info('Splitting done.') 3966 event_files = misc.glob('events_*.lhe', pjoin(self.me_dir, 'Events', self.run_name)) 3967 3968 event_files.sort() 3969 3970 self.update_status('Showering events...', level='shower') 3971 logger.info('(Running in %s)' % rundir) 3972 if shower != 'PYTHIA8': 3973 files.mv(pjoin(self.me_dir, 'MCatNLO', exe), rundir) 3974 files.mv(pjoin(self.me_dir, 'MCatNLO', 'MCATNLO_%s_input' % shower), rundir) 3975 else: 3976 # special treatment for pythia8 3977 files.mv(pjoin(self.me_dir, 'MCatNLO', 'Pythia8.cmd'), rundir) 3978 files.mv(pjoin(self.me_dir, 'MCatNLO', 'Pythia8.exe'), rundir) 3979 if os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')): # this is PY8.1xxx 3980 files.ln(pjoin(self.options['pythia8_path'], 'examples', 'config.sh'), rundir) 3981 files.ln(pjoin(self.options['pythia8_path'], 'xmldoc'), rundir) 3982 else: # this is PY8.2xxx 3983 files.ln(pjoin(self.options['pythia8_path'], 'share/Pythia8/xmldoc'), rundir) 3984 #link the hwpp exe in the rundir 3985 if shower == 'HERWIGPP': 3986 try: 3987 if os.path.exists(pjoin(self.options['hwpp_path'], 'bin', 'Herwig++')): 3988 files.ln(pjoin(self.options['hwpp_path'], 'bin', 'Herwig++'), rundir) 3989 if os.path.exists(pjoin(self.options['hwpp_path'], 'bin', 'Herwig')): 3990 files.ln(pjoin(self.options['hwpp_path'], 'bin', 'Herwig'), rundir) 3991 except Exception: 3992 raise aMCatNLOError('The Herwig++ path set in the configuration file is not valid.') 3993 3994 if os.path.exists(pjoin(self.me_dir, 'MCatNLO', 'HWPPAnalyzer', 'HepMCFortran.so')): 3995 files.cp(pjoin(self.me_dir, 'MCatNLO', 'HWPPAnalyzer', 'HepMCFortran.so'), rundir) 3996 3997 files.ln(evt_file, rundir, 'events.lhe') 3998 for i, f in enumerate(event_files): 3999 files.ln(f, rundir,'events_%d.lhe' % (i + 1)) 4000 4001 if not self.shower_card['analyse']: 4002 # an hep/hepmc file as output 4003 out_id = 'HEP' 4004 else: 4005 # one or more .top file(s) as output 4006 if "HwU" in self.shower_card['analyse']: 4007 out_id = 'HWU' 4008 else: 4009 out_id = 'TOP' 4010 4011 # write the executable 4012 with open(pjoin(rundir, 'shower.sh'), 'w') as fsock: 4013 # set the PATH for the dynamic libraries 4014 if sys.platform == 'darwin': 4015 ld_library_path = 'DYLD_LIBRARY_PATH' 4016 else: 4017 ld_library_path = 'LD_LIBRARY_PATH' 4018 fsock.write(open(pjoin(self.me_dir, 'MCatNLO', 'shower_template.sh')).read() \ 4019 % {'ld_library_path': ld_library_path, 4020 'extralibs': ':'.join(extrapaths)}) 4021 subprocess.call(['chmod', '+x', pjoin(rundir, 'shower.sh')]) 4022 4023 if event_files: 4024 arg_list = [[shower, out_id, self.run_name, '%d' % (i + 1)] \ 4025 for i in range(len(event_files))] 4026 else: 4027 arg_list = [[shower, out_id, self.run_name]] 4028 4029 self.run_all({rundir: 'shower.sh'}, arg_list, 'shower') 4030 self.njobs = 1 4031 self.wait_for_complete('shower') 4032 4033 # now collect the results 4034 message = '' 4035 warning = '' 4036 to_gzip = [evt_file] 4037 if out_id == 'HEP': 4038 #copy the showered stdhep/hepmc file back in events 4039 if shower in ['PYTHIA8', 'HERWIGPP']: 4040 hep_format = 'HEPMC' 4041 ext = 'hepmc' 4042 else: 4043 hep_format = 'StdHEP' 4044 ext = 'hep' 4045 4046 hep_file = '%s_%s_0.%s.gz' % \ 4047 (pjoin(os.path.dirname(evt_file), 'events'), shower, ext) 4048 count = 0 4049 4050 # find the first available name for the output: 4051 # check existing results with or without event splitting 4052 while os.path.exists(hep_file) or \ 4053 os.path.exists(hep_file.replace('.%s.gz' % ext, '__1.%s.gz' % ext)) : 4054 count +=1 4055 hep_file = '%s_%s_%d.%s.gz' % \ 4056 (pjoin(os.path.dirname(evt_file), 'events'), shower, count, ext) 4057 4058 try: 4059 if self.shower_card['nsplit_jobs'] == 1: 4060 files.mv(os.path.join(rundir, 'events.%s.gz' % ext), hep_file) 4061 message = ('The file %s has been generated. \nIt contains showered' + \ 4062 ' and hadronized events in the %s format obtained by' + \ 4063 ' showering the parton-level event file %s.gz with %s') % \ 4064 (hep_file, hep_format, evt_file, shower) 4065 else: 4066 hep_list = [] 4067 for i in range(self.shower_card['nsplit_jobs']): 4068 hep_list.append(hep_file.replace('.%s.gz' % ext, '__%d.%s.gz' % (i + 1, ext))) 4069 files.mv(os.path.join(rundir, 'events_%d.%s.gz' % (i + 1, ext)), hep_list[-1]) 4070 message = ('The following files have been generated:\n %s\nThey contain showered' + \ 4071 ' and hadronized events in the %s format obtained by' + \ 4072 ' showering the (split) parton-level event file %s.gz with %s') % \ 4073 ('\n '.join(hep_list), hep_format, evt_file, shower) 4074 4075 except OSError as IOError: 4076 raise aMCatNLOError('No file has been generated, an error occurred.'+\ 4077 ' More information in %s' % pjoin(os.getcwd(), 'amcatnlo_run.log')) 4078 4079 # run the plot creation in a secure way 4080 if hep_format == 'StdHEP': 4081 try: 4082 self.do_plot('%s -f' % self.run_name) 4083 except Exception as error: 4084 logger.info("Fail to make the plot. Continue...") 4085 pass 4086 4087 elif out_id == 'TOP' or out_id == 'HWU': 4088 #copy the topdrawer or HwU file(s) back in events 4089 if out_id=='TOP': 4090 ext='top' 4091 elif out_id=='HWU': 4092 ext='HwU' 4093 topfiles = [] 4094 top_tars = [tarfile.TarFile(f) for f in misc.glob('histfile*.tar', rundir)] 4095 for top_tar in top_tars: 4096 topfiles.extend(top_tar.getnames()) 4097 4098 # safety check 4099 if len(top_tars) != self.shower_card['nsplit_jobs']: 4100 raise aMCatNLOError('%d job(s) expected, %d file(s) found' % \ 4101 (self.shower_card['nsplit_jobs'], len(top_tars))) 4102 4103 # find the first available name for the output: 4104 # check existing results with or without event splitting 4105 filename = 'plot_%s_%d_' % (shower, 1) 4106 count = 1 4107 while os.path.exists(pjoin(self.me_dir, 'Events', 4108 self.run_name, '%s0.%s' % (filename,ext))) or \ 4109 os.path.exists(pjoin(self.me_dir, 'Events', 4110 self.run_name, '%s0__1.%s' % (filename,ext))): 4111 count += 1 4112 filename = 'plot_%s_%d_' % (shower, count) 4113 4114 if out_id=='TOP': 4115 hist_format='TopDrawer format' 4116 elif out_id=='HWU': 4117 hist_format='HwU and GnuPlot formats' 4118 4119 if not topfiles: 4120 # if no topfiles are found just warn the user 4121 warning = 'No .top file has been generated. For the results of your ' +\ 4122 'run, please check inside %s' % rundir 4123 elif self.shower_card['nsplit_jobs'] == 1: 4124 # only one job for the shower 4125 top_tars[0].extractall(path = rundir) 4126 plotfiles = [] 4127 for i, file in enumerate(topfiles): 4128 if out_id=='TOP': 4129 plotfile = pjoin(self.me_dir, 'Events', self.run_name, 4130 '%s%d.top' % (filename, i)) 4131 files.mv(pjoin(rundir, file), plotfile) 4132 elif out_id=='HWU': 4133 out=pjoin(self.me_dir,'Events', 4134 self.run_name,'%s%d'% (filename,i)) 4135 histos=[{'dirname':pjoin(rundir,file)}] 4136 self.combine_plots_HwU(histos,out) 4137 try: 4138 misc.call(['gnuplot','%s%d.gnuplot' % (filename,i)],\ 4139 stdout=os.open(os.devnull, os.O_RDWR),\ 4140 stderr=os.open(os.devnull, os.O_RDWR),\ 4141 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 4142 except Exception: 4143 pass 4144 plotfile=pjoin(self.me_dir,'Events',self.run_name, 4145 '%s%d.HwU'% (filename,i)) 4146 plotfiles.append(plotfile) 4147 4148 ffiles = 'files' 4149 have = 'have' 4150 if len(plotfiles) == 1: 4151 ffiles = 'file' 4152 have = 'has' 4153 4154 message = ('The %s %s %s been generated, with histograms in the' + \ 4155 ' %s, obtained by showering the parton-level' + \ 4156 ' file %s.gz with %s.') % (ffiles, ', '.join(plotfiles), have, \ 4157 hist_format, evt_file, shower) 4158 else: 4159 # many jobs for the shower have been run 4160 topfiles_set = set(topfiles) 4161 plotfiles = [] 4162 for j, top_tar in enumerate(top_tars): 4163 top_tar.extractall(path = rundir) 4164 for i, file in enumerate(topfiles_set): 4165 plotfile = pjoin(self.me_dir, 'Events', self.run_name, 4166 '%s%d__%d.%s' % (filename, i, j + 1,ext)) 4167 files.mv(pjoin(rundir, file), plotfile) 4168 plotfiles.append(plotfile) 4169 4170 # check if the user asked to combine the .top into a single file 4171 if self.shower_card['combine_td']: 4172 misc.compile(['sum_plots'], cwd = pjoin(self.me_dir, 'Utilities')) 4173 4174 if self.banner.get('run_card', 'event_norm').lower() == 'sum': 4175 norm = 1. 4176 else: 4177 norm = 1./float(self.shower_card['nsplit_jobs']) 4178 4179 plotfiles2 = [] 4180 for i, file in enumerate(topfiles_set): 4181 filelist = ['%s%d__%d.%s' % (filename, i, j + 1,ext) \ 4182 for j in range(self.shower_card['nsplit_jobs'])] 4183 if out_id=='TOP': 4184 infile="%d\n%s\n%s\n" % \ 4185 (self.shower_card['nsplit_jobs'], 4186 '\n'.join(filelist), 4187 '\n'.join([str(norm)] * self.shower_card['nsplit_jobs'])) 4188 p = misc.Popen([pjoin(self.me_dir, 'Utilities', 'sum_plots')], 4189 stdin=subprocess.PIPE, 4190 stdout=os.open(os.devnull, os.O_RDWR), 4191 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 4192 p.communicate(input = infile.encode()) 4193 files.mv(pjoin(self.me_dir, 'Events', self.run_name, 'sum.top'), 4194 pjoin(self.me_dir, 'Events', self.run_name, '%s%d.top' % (filename, i))) 4195 elif out_id=='HWU': 4196 out=pjoin(self.me_dir,'Events', 4197 self.run_name,'%s%d'% (filename,i)) 4198 histos=[] 4199 norms=[] 4200 for plotfile in plotfiles: 4201 histos.append({'dirname':plotfile}) 4202 norms.append(norm) 4203 self.combine_plots_HwU(histos,out,normalisation=norms) 4204 try: 4205 misc.call(['gnuplot','%s%d.gnuplot' % (filename, i)],\ 4206 stdout=os.open(os.devnull, os.O_RDWR),\ 4207 stderr=os.open(os.devnull, os.O_RDWR),\ 4208 cwd=pjoin(self.me_dir, 'Events',self.run_name)) 4209 except Exception: 4210 pass 4211 4212 plotfiles2.append(pjoin(self.me_dir, 'Events', self.run_name, '%s%d.%s' % (filename, i,ext))) 4213 tar = tarfile.open( 4214 pjoin(self.me_dir, 'Events', self.run_name, '%s%d.tar.gz' % (filename, i)), 'w:gz') 4215 for f in filelist: 4216 tar.add(pjoin(self.me_dir, 'Events', self.run_name, f), arcname=f) 4217 files.rm([pjoin(self.me_dir, 'Events', self.run_name, f) for f in filelist]) 4218 4219 tar.close() 4220 4221 ffiles = 'files' 4222 have = 'have' 4223 if len(plotfiles2) == 1: 4224 ffiles = 'file' 4225 have = 'has' 4226 4227 message = ('The %s %s %s been generated, with histograms in the' + \ 4228 ' %s, obtained by showering the parton-level' + \ 4229 ' file %s.gz with %s.\n' + \ 4230 'The files from the different shower ' + \ 4231 'jobs (before combining them) can be found inside %s.') % \ 4232 (ffiles, ', '.join(plotfiles2), have, hist_format,\ 4233 evt_file, shower, 4234 ', '.join([f.replace('%s' % ext, 'tar.gz') for f in plotfiles2])) 4235 4236 else: 4237 message = ('The following files have been generated:\n %s\n' + \ 4238 'They contain histograms in the' + \ 4239 ' %s, obtained by showering the parton-level' + \ 4240 ' file %s.gz with %s.') % ('\n '.join(plotfiles), \ 4241 hist_format, evt_file, shower) 4242 4243 # Now arxiv the shower card used if RunMaterial is present 4244 run_dir_path = pjoin(rundir, self.run_name) 4245 if os.path.exists(pjoin(run_dir_path,'RunMaterial.tar.gz')): 4246 misc.call(['tar','-xzpf','RunMaterial.tar.gz'],cwd=run_dir_path) 4247 files.cp(pjoin(self.me_dir,'Cards','shower_card.dat'), 4248 pjoin(run_dir_path,'RunMaterial','shower_card_for_%s_%d.dat'\ 4249 %(shower, count))) 4250 misc.call(['tar','-czpf','RunMaterial.tar.gz','RunMaterial'], 4251 cwd=run_dir_path) 4252 shutil.rmtree(pjoin(run_dir_path,'RunMaterial')) 4253 4254 if self.run_card['ickkw'] >0 : 4255 if self.run_card['ickkw'] != 3 or shower != 'PYTHIA8': 4256 logger.warning("Merged cross-section not retrieved by MadGraph. Please check the parton-shower log to get the correct cross-section after merging") 4257 else: 4258 pythia_log = misc.BackRead(pjoin(rundir, "mcatnlo_run.log") ) 4259 4260 pythiare = re.compile("\s*Les Houches User Process\(es\)\s+9999\s*\|\s*(?P<generated>\d+)\s+(?P<tried>\d+)\s+(?P<accepted>\d+)\s*\|\s*(?P<xsec>[\d\.DeE\-+]+)\s+(?P<xerr>[\d\.DeE\-+]+)\s*\|") 4261 # | Les Houches User Process(es) 9999 | 10000 10000 7115 | 1.120e-04 0.000e+00 | 4262 4263 for line in pythia_log: 4264 info = pythiare.search(line) 4265 if not info: 4266 continue 4267 try: 4268 # Pythia cross section in mb, we want pb 4269 sigma_m = float(info.group('xsec').replace('D','E')) *1e9 4270 sigma_err = float(info.group('xerr').replace('D','E')) *1e9 4271 Nacc = int(info.group('accepted')) 4272 #Ntry = int(info.group('accepted')) 4273 except: 4274 logger.warning("Merged cross-section not retrieved by MadGraph. Please check the parton-shower log to get the correct cross-section after merging") 4275 break 4276 4277 self.results.add_detail('cross_pythia', sigma_m) 4278 self.results.add_detail('nb_event_pythia', Nacc) 4279 self.results.add_detail('error_pythia', sigma_err) 4280 self.results.add_detail('shower_dir', os.path.basename(rundir)) 4281 logger.info("\nFxFx Cross-Section:\n"+\ 4282 "======================\n"+\ 4283 " %f pb.\n" 4284 " Number of events after merging: %s\n", sigma_m, Nacc, '$MG:BOLD') 4285 break 4286 else: 4287 logger.warning("Merged cross-section not retrieved by MadGraph. Please check the parton-shower log to get the correct cross-section after merging") 4288 4289 4290 4291 4292 4293 4294 # end of the run, gzip files and print out the message/warning 4295 for f in to_gzip: 4296 misc.gzip(f) 4297 if message: 4298 logger.info(message) 4299 if warning: 4300 logger.warning(warning) 4301 4302 self.update_status('Run complete', level='shower', update_results=True)
4303 4304 ############################################################################
4305 - def set_run_name(self, name, tag=None, level='parton', reload_card=False,**opts):
4306 """define the run name, the run_tag, the banner and the results.""" 4307 4308 # when are we force to change the tag new_run:previous run requiring changes 4309 upgrade_tag = {'parton': ['parton','delphes','shower','madanalysis5_hadron'], 4310 'shower': ['shower','delphes','madanalysis5_hadron'], 4311 'delphes':['delphes'], 4312 'madanalysis5_hadron':['madanalysis5_hadron'], 4313 'plot':[]} 4314 4315 if name == self.run_name: 4316 if reload_card: 4317 run_card = pjoin(self.me_dir, 'Cards','run_card.dat') 4318 self.run_card = banner_mod.RunCardNLO(run_card) 4319 4320 #check if we need to change the tag 4321 if tag: 4322 self.run_card['run_tag'] = tag 4323 self.run_tag = tag 4324 self.results.add_run(self.run_name, self.run_card) 4325 else: 4326 for tag in upgrade_tag[level]: 4327 if getattr(self.results[self.run_name][-1], tag): 4328 tag = self.get_available_tag() 4329 self.run_card['run_tag'] = tag 4330 self.run_tag = tag 4331 self.results.add_run(self.run_name, self.run_card) 4332 break 4333 return # Nothing to do anymore 4334 4335 # save/clean previous run 4336 if self.run_name: 4337 self.store_result() 4338 # store new name 4339 self.run_name = name 4340 4341 # Read run_card 4342 run_card = pjoin(self.me_dir, 'Cards','run_card.dat') 4343 self.run_card = banner_mod.RunCardNLO(run_card) 4344 4345 new_tag = False 4346 # First call for this run -> set the banner 4347 self.banner = banner_mod.recover_banner(self.results, level, self.run_name, tag) 4348 if 'mgruncard' in self.banner: 4349 self.run_card = self.banner.charge_card('run_card') 4350 if tag: 4351 self.run_card['run_tag'] = tag 4352 new_tag = True 4353 elif not self.run_name in self.results and level =='parton': 4354 pass # No results yet, so current tag is fine 4355 elif not self.run_name in self.results: 4356 #This is only for case when you want to trick the interface 4357 logger.warning('Trying to run data on unknown run.') 4358 self.results.add_run(name, self.run_card) 4359 self.results.update('add run %s' % name, 'all', makehtml=True) 4360 else: 4361 for tag in upgrade_tag[level]: 4362 4363 if getattr(self.results[self.run_name][-1], tag): 4364 # LEVEL is already define in the last tag -> need to switch tag 4365 tag = self.get_available_tag() 4366 self.run_card['run_tag'] = tag 4367 new_tag = True 4368 break 4369 if not new_tag: 4370 # We can add the results to the current run 4371 tag = self.results[self.run_name][-1]['tag'] 4372 self.run_card['run_tag'] = tag # ensure that run_tag is correct 4373 4374 4375 if name in self.results and not new_tag: 4376 self.results.def_current(self.run_name) 4377 else: 4378 self.results.add_run(self.run_name, self.run_card) 4379 4380 self.run_tag = self.run_card['run_tag'] 4381 4382 # Return the tag of the previous run having the required data for this 4383 # tag/run to working wel. 4384 if level == 'parton': 4385 return 4386 elif level == 'pythia': 4387 return self.results[self.run_name][0]['tag'] 4388 else: 4389 for i in range(-1,-len(self.results[self.run_name])-1,-1): 4390 tagRun = self.results[self.run_name][i] 4391 if tagRun.pythia: 4392 return tagRun['tag']
4393 4394
4395 - def store_result(self):
4396 """ tar the pythia results. This is done when we are quite sure that 4397 the pythia output will not be use anymore """ 4398 4399 if not self.run_name: 4400 return 4401 4402 self.results.save() 4403 4404 if not self.to_store: 4405 return 4406 4407 if 'event' in self.to_store: 4408 if os.path.exists(pjoin(self.me_dir,'Events', self.run_name, 'events.lhe')): 4409 if not os.path.exists(pjoin(self.me_dir,'Events', self.run_name, 'events.lhe.gz')): 4410 self.update_status('gzipping output file: events.lhe', level='parton', error=True) 4411 misc.gzip(pjoin(self.me_dir,'Events', self.run_name, 'events.lhe')) 4412 else: 4413 os.remove(pjoin(self.me_dir,'Events', self.run_name, 'events.lhe')) 4414 if os.path.exists(pjoin(self.me_dir,'Events','reweight.lhe')): 4415 os.remove(pjoin(self.me_dir,'Events', 'reweight.lhe')) 4416 4417 4418 tag = self.run_card['run_tag'] 4419 4420 self.to_store = []
4421 4422 4423 ############################################################################
4424 - def get_Gdir(self, Pdir=None):
4425 """get the list of Gdirectory if not yet saved.""" 4426 4427 if hasattr(self, "Gdirs"): 4428 if self.me_dir in self.Gdirs: 4429 if Pdir is None: 4430 return sum(self.Gdirs.values()) 4431 else: 4432 return self.Gdirs[Pdir] 4433 4434 Pdirs = self.get_Pdir() 4435 Gdirs = {self.me_dir:[]} 4436 for P in Pdirs: 4437 Gdirs[P] = [pjoin(P,G) for G in os.listdir(P) if G.startswith('G') and 4438 os.path.isdir(pjoin(P,G))] 4439 4440 self.Gdirs = Gdirs 4441 return self.getGdir(Pdir)
4442 4443
4444 - def get_init_dict(self, evt_file):
4445 """reads the info in the init block and returns them in a dictionary""" 4446 ev_file = open(evt_file) 4447 init = "" 4448 found = False 4449 while True: 4450 line = ev_file.readline() 4451 if "<init>" in line: 4452 found = True 4453 elif found and not line.startswith('#'): 4454 init += line 4455 if "</init>" in line or "<event>" in line: 4456 break 4457 ev_file.close() 4458 4459 # IDBMUP(1),IDBMUP(2),EBMUP(1),EBMUP(2), PDFGUP(1),PDFGUP(2), 4460 # PDFSUP(1),PDFSUP(2),IDWTUP,NPRUP 4461 # these are not included (so far) in the init_dict 4462 # XSECUP(1),XERRUP(1),XMAXUP(1),LPRUP(1) 4463 4464 init_dict = {} 4465 init_dict['idbmup1'] = int(init.split()[0]) 4466 init_dict['idbmup2'] = int(init.split()[1]) 4467 init_dict['ebmup1'] = float(init.split()[2]) 4468 init_dict['ebmup2'] = float(init.split()[3]) 4469 init_dict['pdfgup1'] = int(init.split()[4]) 4470 init_dict['pdfgup2'] = int(init.split()[5]) 4471 init_dict['pdfsup1'] = int(init.split()[6]) 4472 init_dict['pdfsup2'] = int(init.split()[7]) 4473 init_dict['idwtup'] = int(init.split()[8]) 4474 init_dict['nprup'] = int(init.split()[9]) 4475 4476 return init_dict
4477 4478
4479 - def banner_to_mcatnlo(self, evt_file):
4480 """creates the mcatnlo input script using the values set in the header of the event_file. 4481 It also checks if the lhapdf library is used""" 4482 4483 shower = self.banner.get('run_card', 'parton_shower').upper() 4484 pdlabel = self.banner.get('run_card', 'pdlabel') 4485 itry = 0 4486 nevents = self.shower_card['nevents'] 4487 init_dict = self.get_init_dict(evt_file) 4488 4489 if nevents < 0 or \ 4490 nevents > self.banner.get_detail('run_card', 'nevents'): 4491 nevents = self.banner.get_detail('run_card', 'nevents') 4492 4493 nevents = nevents / self.shower_card['nsplit_jobs'] 4494 4495 mcmass_dict = {} 4496 for line in [l for l in self.banner['montecarlomasses'].split('\n') if l]: 4497 pdg = int(line.split()[0]) 4498 mass = float(line.split()[1]) 4499 mcmass_dict[pdg] = mass 4500 4501 content = 'EVPREFIX=%s\n' % pjoin(os.path.split(evt_file)[1]) 4502 content += 'NEVENTS=%d\n' % nevents 4503 content += 'NEVENTS_TOT=%d\n' % (self.banner.get_detail('run_card', 'nevents') /\ 4504 self.shower_card['nsplit_jobs']) 4505 content += 'MCMODE=%s\n' % shower 4506 content += 'PDLABEL=%s\n' % pdlabel 4507 4508 try: 4509 aewm1 = self.banner.get_detail('param_card', 'sminputs', 1).value 4510 raise KeyError 4511 except KeyError: 4512 mod = self.get_model() 4513 if not hasattr(mod, 'parameter_dict'): 4514 from models import model_reader 4515 mod = model_reader.ModelReader(mod) 4516 mod.set_parameters_and_couplings(self.banner.param_card) 4517 aewm1 = 0 4518 for key in ['aEWM1', 'AEWM1', 'aEWm1', 'aewm1']: 4519 if key in mod['parameter_dict']: 4520 aewm1 = mod['parameter_dict'][key] 4521 break 4522 elif 'mdl_%s' % key in mod['parameter_dict']: 4523 aewm1 = mod['parameter_dict']['mod_%s' % key] 4524 break 4525 else: 4526 for key in ['aEW', 'AEW', 'aEw', 'aew']: 4527 if key in mod['parameter_dict']: 4528 aewm1 = 1./mod['parameter_dict'][key] 4529 break 4530 elif 'mdl_%s' % key in mod['parameter_dict']: 4531 aewm1 = 1./mod['parameter_dict']['mod_%s' % key] 4532 break 4533 4534 content += 'ALPHAEW=%s\n' % aewm1 4535 #content += 'PDFSET=%s\n' % self.banner.get_detail('run_card', 'lhaid') 4536 #content += 'PDFSET=%s\n' % max([init_dict['pdfsup1'],init_dict['pdfsup2']]) 4537 content += 'TMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 6).value 4538 content += 'TWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 6).value 4539 content += 'ZMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 23).value 4540 content += 'ZWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 23).value 4541 content += 'WMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 24).value 4542 content += 'WWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 24).value 4543 try: 4544 content += 'HGGMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 25).value 4545 content += 'HGGWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 25).value 4546 except KeyError: 4547 content += 'HGGMASS=120.\n' 4548 content += 'HGGWIDTH=0.00575308848\n' 4549 content += 'beammom1=%s\n' % self.banner.get_detail('run_card', 'ebeam1') 4550 content += 'beammom2=%s\n' % self.banner.get_detail('run_card', 'ebeam2') 4551 content += 'BEAM1=%s\n' % self.banner.get_detail('run_card', 'lpp1') 4552 content += 'BEAM2=%s\n' % self.banner.get_detail('run_card', 'lpp2') 4553 content += 'DMASS=%s\n' % mcmass_dict[1] 4554 content += 'UMASS=%s\n' % mcmass_dict[2] 4555 content += 'SMASS=%s\n' % mcmass_dict[3] 4556 content += 'CMASS=%s\n' % mcmass_dict[4] 4557 content += 'BMASS=%s\n' % mcmass_dict[5] 4558 try: 4559 content += 'EMASS=%s\n' % mcmass_dict[11] 4560 content += 'MUMASS=%s\n' % mcmass_dict[13] 4561 content += 'TAUMASS=%s\n' % mcmass_dict[15] 4562 except KeyError: 4563 # this is for backward compatibility 4564 mcmass_lines = [l for l in \ 4565 open(pjoin(self.me_dir, 'SubProcesses', 'MCmasses_%s.inc' % shower.upper()) 4566 ).read().split('\n') if l] 4567 new_mcmass_dict = {} 4568 for l in mcmass_lines: 4569 key, val = l.split('=') 4570 new_mcmass_dict[key.strip()] = val.replace('d', 'e').strip() 4571 content += 'EMASS=%s\n' % new_mcmass_dict['mcmass(11)'] 4572 content += 'MUMASS=%s\n' % new_mcmass_dict['mcmass(13)'] 4573 content += 'TAUMASS=%s\n' % new_mcmass_dict['mcmass(15)'] 4574 4575 content += 'GMASS=%s\n' % mcmass_dict[21] 4576 content += 'EVENT_NORM=%s\n' % self.banner.get_detail('run_card', 'event_norm').lower() 4577 # check if need to link lhapdf 4578 if int(self.shower_card['pdfcode']) > 1 or \ 4579 (pdlabel=='lhapdf' and int(self.shower_card['pdfcode'])==1) or \ 4580 shower=='HERWIGPP' : 4581 # Use LHAPDF (should be correctly installed, because 4582 # either events were already generated with them, or the 4583 # user explicitly gives an LHAPDF number in the 4584 # shower_card). 4585 self.link_lhapdf(pjoin(self.me_dir, 'lib')) 4586 lhapdfpath = subprocess.Popen([self.options['lhapdf'], '--prefix'], 4587 stdout = subprocess.PIPE).stdout.read().decode().strip() 4588 content += 'LHAPDFPATH=%s\n' % lhapdfpath 4589 pdfsetsdir = self.get_lhapdf_pdfsetsdir() 4590 if self.shower_card['pdfcode']==0: 4591 lhaid_list = '' 4592 content += '' 4593 elif self.shower_card['pdfcode']==1: 4594 lhaid_list = [max([init_dict['pdfsup1'],init_dict['pdfsup2']])] 4595 content += 'PDFCODE=%s\n' % max([init_dict['pdfsup1'],init_dict['pdfsup2']]) 4596 else: 4597 lhaid_list = [abs(int(self.shower_card['pdfcode']))] 4598 content += 'PDFCODE=%s\n' % self.shower_card['pdfcode'] 4599 self.copy_lhapdf_set(lhaid_list, pdfsetsdir) 4600 elif int(self.shower_card['pdfcode'])==1 or \ 4601 int(self.shower_card['pdfcode'])==-1 and True: 4602 # Try to use LHAPDF because user wants to use the same PDF 4603 # as was used for the event generation. However, for the 4604 # event generation, LHAPDF was not used, so non-trivial to 4605 # see if if LHAPDF is available with the corresponding PDF 4606 # set. If not found, give a warning and use build-in PDF 4607 # set instead. 4608 try: 4609 lhapdfpath = subprocess.Popen([self.options['lhapdf'], '--prefix'], 4610 stdout = subprocess.PIPE).stdout.read().decode().strip() 4611 self.link_lhapdf(pjoin(self.me_dir, 'lib')) 4612 content += 'LHAPDFPATH=%s\n' % lhapdfpath 4613 pdfsetsdir = self.get_lhapdf_pdfsetsdir() 4614 lhaid_list = [max([init_dict['pdfsup1'],init_dict['pdfsup2']])] 4615 content += 'PDFCODE=%s\n' % max([init_dict['pdfsup1'],init_dict['pdfsup2']]) 4616 self.copy_lhapdf_set(lhaid_list, pdfsetsdir) 4617 except Exception: 4618 logger.warning('Trying to shower events using the same PDF in the shower as used in the generation'+\ 4619 ' of the events using LHAPDF. However, no valid LHAPDF installation found with the'+\ 4620 ' needed PDF set. Will use default internal PDF for the shower instead. To use the'+\ 4621 ' same set as was used in the event generation install LHAPDF and set the path using'+\ 4622 ' "set /path_to_lhapdf/bin/lhapdf-config" from the MadGraph5_aMC@NLO python shell') 4623 content += 'LHAPDFPATH=\n' 4624 content += 'PDFCODE=0\n' 4625 else: 4626 content += 'LHAPDFPATH=\n' 4627 content += 'PDFCODE=0\n' 4628 4629 content += 'ICKKW=%s\n' % self.banner.get_detail('run_card', 'ickkw') 4630 content += 'PTJCUT=%s\n' % self.banner.get_detail('run_card', 'ptj') 4631 # add the pythia8/hwpp path(s) 4632 if self.options['pythia8_path']: 4633 content+='PY8PATH=%s\n' % self.options['pythia8_path'] 4634 if self.options['hwpp_path']: 4635 content+='HWPPPATH=%s\n' % self.options['hwpp_path'] 4636 if self.options['thepeg_path'] and self.options['thepeg_path'] != self.options['hwpp_path']: 4637 content+='THEPEGPATH=%s\n' % self.options['thepeg_path'] 4638 if self.options['hepmc_path'] and self.options['hepmc_path'] != self.options['hwpp_path']: 4639 content+='HEPMCPATH=%s\n' % self.options['hepmc_path'] 4640 4641 output = open(pjoin(self.me_dir, 'MCatNLO', 'banner.dat'), 'w') 4642 output.write(content) 4643 output.close() 4644 return shower
4645 4646
4647 - def run_reweight(self, only):
4648 """runs the reweight_xsec_events executables on each sub-event file generated 4649 to compute on the fly scale and/or PDF uncertainities""" 4650 logger.info(' Doing reweight') 4651 4652 nev_unw = pjoin(self.me_dir, 'SubProcesses', 'nevents_unweighted') 4653 # if only doing reweight, copy back the nevents_unweighted file 4654 if only: 4655 if os.path.exists(nev_unw + '.orig'): 4656 files.cp(nev_unw + '.orig', nev_unw) 4657 else: 4658 raise aMCatNLOError('Cannot find event file information') 4659 4660 #read the nevents_unweighted file to get the list of event files 4661 file = open(nev_unw) 4662 lines = file.read().split('\n') 4663 file.close() 4664 # make copy of the original nevent_unweighted file 4665 files.cp(nev_unw, nev_unw + '.orig') 4666 # loop over lines (all but the last one whith is empty) and check that the 4667 # number of events is not 0 4668 evt_files = [line.split()[0] for line in lines[:-1] if line.split()[1] != '0'] 4669 evt_wghts = [float(line.split()[3]) for line in lines[:-1] if line.split()[1] != '0'] 4670 if self.run_card['event_norm'].lower()=='bias' and self.run_card['nevents'] != 0: 4671 evt_wghts[:]=[1./float(self.run_card['nevents']) for wgt in evt_wghts] 4672 #prepare the job_dict 4673 job_dict = {} 4674 exe = 'reweight_xsec_events.local' 4675 for i, evt_file in enumerate(evt_files): 4676 path, evt = os.path.split(evt_file) 4677 files.ln(pjoin(self.me_dir, 'SubProcesses', exe), \ 4678 pjoin(self.me_dir, 'SubProcesses', path)) 4679 job_dict[path] = [exe] 4680 4681 self.run_all(job_dict, [[evt, '1']], 'Running reweight') 4682 4683 #check that the new event files are complete 4684 for evt_file in evt_files: 4685 last_line = subprocess.Popen(['tail', '-n1', '%s.rwgt' % \ 4686 pjoin(self.me_dir, 'SubProcesses', evt_file)], \ 4687 stdout = subprocess.PIPE).stdout.read().decode().strip() 4688 if last_line != "</LesHouchesEvents>": 4689 raise aMCatNLOError('An error occurred during reweighting. Check the' + \ 4690 '\'reweight_xsec_events.output\' files inside the ' + \ 4691 '\'SubProcesses/P*/G*/\' directories for details') 4692 4693 #update file name in nevents_unweighted 4694 newfile = open(nev_unw, 'w') 4695 for line in lines: 4696 if line: 4697 newfile.write(line.replace(line.split()[0], line.split()[0] + '.rwgt') + '\n') 4698 newfile.close() 4699 return self.pdf_scale_from_reweighting(evt_files,evt_wghts)
4700
4701 - def pdf_scale_from_reweighting(self, evt_files,evt_wghts):
4702 """This function takes the files with the scale and pdf values 4703 written by the reweight_xsec_events.f code 4704 (P*/G*/pdf_scale_dependence.dat) and computes the overall 4705 scale and PDF uncertainty (the latter is computed using the 4706 Hessian method (if lhaid<90000) or Gaussian (if lhaid>90000)) 4707 and returns it in percents. The expected format of the file 4708 is: n_scales xsec_scale_central xsec_scale1 ... n_pdf 4709 xsec_pdf0 xsec_pdf1 ....""" 4710 4711 scales=[] 4712 pdfs=[] 4713 for i,evt_file in enumerate(evt_files): 4714 path, evt=os.path.split(evt_file) 4715 with open(pjoin(self.me_dir, 'SubProcesses', path, 'scale_pdf_dependence.dat'),'r') as f: 4716 data_line=f.readline() 4717 if "scale variations:" in data_line: 4718 for j,scale in enumerate(self.run_card['dynamical_scale_choice']): 4719 data_line = f.readline().split() 4720 scales_this = [float(val)*evt_wghts[i] for val in f.readline().replace("D", "E").split()] 4721 try: 4722 scales[j] = [a + b for a, b in zip(scales[j], scales_this)] 4723 except IndexError: 4724 scales+=[scales_this] 4725 data_line=f.readline() 4726 if "pdf variations:" in data_line: 4727 for j,pdf in enumerate(self.run_card['lhaid']): 4728 data_line = f.readline().split() 4729 pdfs_this = [float(val)*evt_wghts[i] for val in f.readline().replace("D", "E").split()] 4730 try: 4731 pdfs[j] = [a + b for a, b in zip(pdfs[j], pdfs_this)] 4732 except IndexError: 4733 pdfs+=[pdfs_this] 4734 4735 # get the scale uncertainty in percent 4736 scale_info=[] 4737 for j,scale in enumerate(scales): 4738 s_cen=scale[0] 4739 if s_cen != 0.0 and self.run_card['reweight_scale'][j]: 4740 # max and min of the full envelope 4741 s_max=(max(scale)/s_cen-1)*100 4742 s_min=(1-min(scale)/s_cen)*100 4743 # ren and fac scale dependence added in quadrature 4744 ren_var=[] 4745 fac_var=[] 4746 for i in range(len(self.run_card['rw_rscale'])): 4747 ren_var.append(scale[i]-s_cen) # central fac scale 4748 for i in range(len(self.run_card['rw_fscale'])): 4749 fac_var.append(scale[i*len(self.run_card['rw_rscale'])]-s_cen) # central ren scale 4750 s_max_q=((s_cen+math.sqrt(math.pow(max(ren_var),2)+math.pow(max(fac_var),2)))/s_cen-1)*100 4751 s_min_q=(1-(s_cen-math.sqrt(math.pow(min(ren_var),2)+math.pow(min(fac_var),2)))/s_cen)*100 4752 s_size=len(scale) 4753 else: 4754 s_max=0.0 4755 s_min=0.0 4756 s_max_q=0.0 4757 s_min_q=0.0 4758 s_size=len(scale) 4759 scale_info.append({'cen':s_cen, 'min':s_min, 'max':s_max, \ 4760 'min_q':s_min_q, 'max_q':s_max_q, 'size':s_size, \ 4761 'label':self.run_card['dynamical_scale_choice'][j], \ 4762 'unc':self.run_card['reweight_scale'][j]}) 4763 4764 # check if we can use LHAPDF to compute the PDF uncertainty 4765 if any(self.run_card['reweight_pdf']): 4766 lhapdf = misc.import_python_lhapdf(self.options['lhapdf']) 4767 if lhapdf: 4768 use_lhapdf = True 4769 else: 4770 logger.warning("Failed to access python version of LHAPDF: "\ 4771 "cannot compute PDF uncertainty from the "\ 4772 "weights in the events. The weights in the LHE " \ 4773 "event files will still cover all PDF set members, "\ 4774 "but there will be no PDF uncertainty printed in the run summary. \n "\ 4775 "If the python interface to LHAPDF is available on your system, try "\ 4776 "adding its location to the PYTHONPATH environment variable and the"\ 4777 "LHAPDF library location to LD_LIBRARY_PATH (linux) or DYLD_LIBRARY_PATH (mac os x).") 4778 use_lhapdf=False 4779 4780 # turn off lhapdf printing any messages 4781 if any(self.run_card['reweight_pdf']) and use_lhapdf: lhapdf.setVerbosity(0) 4782 4783 pdf_info=[] 4784 for j,pdfset in enumerate(pdfs): 4785 p_cen=pdfset[0] 4786 if p_cen != 0.0 and self.run_card['reweight_pdf'][j]: 4787 if use_lhapdf: 4788 pdfsetname=self.run_card['lhapdfsetname'][j] 4789 try: 4790 p=lhapdf.getPDFSet(pdfsetname) 4791 ep=p.uncertainty(pdfset,-1) 4792 p_cen=ep.central 4793 p_min=abs(ep.errminus/p_cen)*100 4794 p_max=abs(ep.errplus/p_cen)*100 4795 p_type=p.errorType 4796 p_size=p.size 4797 p_conf=p.errorConfLevel 4798 except: 4799 logger.warning("Could not access LHAPDF to compute uncertainties for %s" % pdfsetname) 4800 p_min=0.0 4801 p_max=0.0 4802 p_type='unknown' 4803 p_conf='unknown' 4804 p_size=len(pdfset) 4805 else: 4806 p_min=0.0 4807 p_max=0.0 4808 p_type='unknown' 4809 p_conf='unknown' 4810 p_size=len(pdfset) 4811 pdfsetname=self.run_card['lhaid'][j] 4812 else: 4813 p_min=0.0 4814 p_max=0.0 4815 p_type='none' 4816 p_conf='unknown' 4817 p_size=len(pdfset) 4818 pdfsetname=self.run_card['lhaid'][j] 4819 pdf_info.append({'cen':p_cen, 'min':p_min, 'max':p_max, \ 4820 'unc':p_type, 'name':pdfsetname, 'size':p_size, \ 4821 'label':self.run_card['lhaid'][j], 'conf':p_conf}) 4822 4823 scale_pdf_info=[scale_info,pdf_info] 4824 return scale_pdf_info
4825 4826
4827 - def wait_for_complete(self, run_type):
4828 """this function waits for jobs on cluster to complete their run.""" 4829 starttime = time.time() 4830 #logger.info(' Waiting for submitted jobs to complete') 4831 update_status = lambda i, r, f: self.update_status((i, r, f, run_type), 4832 starttime=starttime, level='parton', update_results=True) 4833 try: 4834 self.cluster.wait(self.me_dir, update_status) 4835 except: 4836 self.cluster.remove() 4837 raise
4838
4839 - def run_all(self, job_dict, arg_list, run_type='monitor', split_jobs = False):
4840 """runs the jobs in job_dict (organized as folder: [job_list]), with arguments args""" 4841 self.ijob = 0 4842 if run_type != 'shower': 4843 self.njobs = sum(len(jobs) for jobs in job_dict.values()) * len(arg_list) 4844 for args in arg_list: 4845 for Pdir, jobs in job_dict.items(): 4846 for job in jobs: 4847 self.run_exe(job, args, run_type, cwd=pjoin(self.me_dir, 'SubProcesses', Pdir) ) 4848 if self.cluster_mode == 2: 4849 time.sleep(1) # security to allow all jobs to be launched 4850 else: 4851 self.njobs = len(arg_list) 4852 for args in arg_list: 4853 [(cwd, exe)] = list(job_dict.items()) 4854 self.run_exe(exe, args, run_type, cwd) 4855 4856 self.wait_for_complete(run_type)
4857 4858 4859
4860 - def check_event_files(self,jobs):
4861 """check the integrity of the event files after splitting, and resubmit 4862 those which are not nicely terminated""" 4863 jobs_to_resubmit = [] 4864 for job in jobs: 4865 last_line = '' 4866 try: 4867 last_line = subprocess.Popen( 4868 ['tail', '-n1', pjoin(job['dirname'], 'events.lhe')], \ 4869 stdout = subprocess.PIPE).stdout.read().decode().strip() 4870 except IOError: 4871 pass 4872 if last_line != "</LesHouchesEvents>": 4873 jobs_to_resubmit.append(job) 4874 self.njobs = 0 4875 if jobs_to_resubmit: 4876 run_type = 'Resubmitting broken jobs' 4877 logger.info('Some event files are broken, corresponding jobs will be resubmitted.') 4878 for job in jobs_to_resubmit: 4879 logger.debug('Resubmitting ' + job['dirname'] + '\n') 4880 self.run_all_jobs(jobs_to_resubmit,2,fixed_order=False)
4881 4882
4883 - def find_jobs_to_split(self, pdir, job, arg):
4884 """looks into the nevents_unweighed_splitted file to check how many 4885 split jobs are needed for this (pdir, job). arg is F, B or V""" 4886 # find the number of the integration channel 4887 splittings = [] 4888 ajob = open(pjoin(self.me_dir, 'SubProcesses', pdir, job)).read() 4889 pattern = re.compile('for i in (\d+) ; do') 4890 match = re.search(pattern, ajob) 4891 channel = match.groups()[0] 4892 # then open the nevents_unweighted_splitted file and look for the 4893 # number of splittings to be done 4894 nevents_file = open(pjoin(self.me_dir, 'SubProcesses', 'nevents_unweighted_splitted')).read() 4895 # This skips the channels with zero events, because they are 4896 # not of the form GFXX_YY, but simply GFXX 4897 pattern = re.compile(r"%s_(\d+)/events.lhe" % \ 4898 pjoin(pdir, 'G%s%s' % (arg,channel))) 4899 matches = re.findall(pattern, nevents_file) 4900 for m in matches: 4901 splittings.append(m) 4902 return splittings
4903 4904
4905 - def run_exe(self, exe, args, run_type, cwd=None):
4906 """this basic function launch locally/on cluster exe with args as argument. 4907 """ 4908 # first test that exe exists: 4909 execpath = None 4910 if cwd and os.path.exists(pjoin(cwd, exe)): 4911 execpath = pjoin(cwd, exe) 4912 elif not cwd and os.path.exists(exe): 4913 execpath = exe 4914 else: 4915 raise aMCatNLOError('Cannot find executable %s in %s' \ 4916 % (exe, os.getcwd())) 4917 # check that the executable has exec permissions 4918 if self.cluster_mode == 1 and not os.access(execpath, os.X_OK): 4919 subprocess.call(['chmod', '+x', exe], cwd=cwd) 4920 # finally run it 4921 if self.cluster_mode == 0: 4922 #this is for the serial run 4923 misc.call(['./'+exe] + args, cwd=cwd) 4924 self.ijob += 1 4925 self.update_status((max([self.njobs - self.ijob - 1, 0]), 4926 min([1, self.njobs - self.ijob]), 4927 self.ijob, run_type), level='parton') 4928 4929 #this is for the cluster/multicore run 4930 elif 'reweight' in exe: 4931 # a reweight run 4932 # Find the correct PDF input file 4933 input_files, output_files = [], [] 4934 pdfinput = self.get_pdf_input_filename() 4935 if os.path.exists(pdfinput): 4936 input_files.append(pdfinput) 4937 input_files.append(pjoin(os.path.dirname(exe), os.path.pardir, 'reweight_xsec_events')) 4938 input_files.append(pjoin(cwd, os.path.pardir, 'leshouche_info.dat')) 4939 input_files.append(args[0]) 4940 output_files.append('%s.rwgt' % os.path.basename(args[0])) 4941 output_files.append('reweight_xsec_events.output') 4942 output_files.append('scale_pdf_dependence.dat') 4943 4944 return self.cluster.submit2(exe, args, cwd=cwd, 4945 input_files=input_files, output_files=output_files, 4946 required_output=output_files) 4947 4948 elif 'ajob' in exe: 4949 # the 'standard' amcatnlo job 4950 # check if args is a list of string 4951 if type(args[0]) == str: 4952 input_files, output_files, required_output, args = self.getIO_ajob(exe,cwd,args) 4953 #submitting 4954 self.cluster.submit2(exe, args, cwd=cwd, 4955 input_files=input_files, output_files=output_files, 4956 required_output=required_output) 4957 4958 # # keep track of folders and arguments for splitted evt gen 4959 # subfolder=output_files[-1].split('/')[0] 4960 # if len(args) == 4 and '_' in subfolder: 4961 # self.split_folders[pjoin(cwd,subfolder)] = [exe] + args 4962 4963 elif 'shower' in exe: 4964 # a shower job 4965 # args are [shower, output(HEP or TOP), run_name] 4966 # cwd is the shower rundir, where the executable are found 4967 input_files, output_files = [], [] 4968 shower = args[0] 4969 # the input files 4970 if shower == 'PYTHIA8': 4971 input_files.append(pjoin(cwd, 'Pythia8.exe')) 4972 input_files.append(pjoin(cwd, 'Pythia8.cmd')) 4973 if os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')): 4974 input_files.append(pjoin(cwd, 'config.sh')) 4975 input_files.append(pjoin(self.options['pythia8_path'], 'xmldoc')) 4976 else: 4977 input_files.append(pjoin(self.options['pythia8_path'], 'share/Pythia8/xmldoc')) 4978 else: 4979 input_files.append(pjoin(cwd, 'MCATNLO_%s_EXE' % shower)) 4980 input_files.append(pjoin(cwd, 'MCATNLO_%s_input' % shower)) 4981 if shower == 'HERWIGPP': 4982 if os.path.exists(pjoin(self.options['hwpp_path'], 'bin', 'Herwig++')): 4983 input_files.append(pjoin(cwd, 'Herwig++')) 4984 if os.path.exists(pjoin(self.options['hwpp_path'], 'bin', 'Herwig')): 4985 input_files.append(pjoin(cwd, 'Herwig')) 4986 input_files.append(pjoin(cwd, 'HepMCFortran.so')) 4987 if len(args) == 3: 4988 if os.path.exists(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz')): 4989 input_files.append(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz')) 4990 elif os.path.exists(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe')): 4991 input_files.append(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe')) 4992 else: 4993 raise aMCatNLOError('Event file not present in %s' % \ 4994 pjoin(self.me_dir, 'Events', self.run_name)) 4995 else: 4996 input_files.append(pjoin(cwd, 'events_%s.lhe' % args[3])) 4997 # the output files 4998 if len(args) == 3: 4999 output_files.append('mcatnlo_run.log') 5000 else: 5001 output_files.append('mcatnlo_run_%s.log' % args[3]) 5002 if args[1] == 'HEP': 5003 if len(args) == 3: 5004 fname = 'events' 5005 else: 5006 fname = 'events_%s' % args[3] 5007 if shower in ['PYTHIA8', 'HERWIGPP']: 5008 output_files.append(fname + '.hepmc.gz') 5009 else: 5010 output_files.append(fname + '.hep.gz') 5011 elif args[1] == 'TOP' or args[1] == 'HWU': 5012 if len(args) == 3: 5013 fname = 'histfile' 5014 else: 5015 fname = 'histfile_%s' % args[3] 5016 output_files.append(fname + '.tar') 5017 else: 5018 raise aMCatNLOError('Not a valid output argument for shower job : %d' % args[1]) 5019 #submitting 5020 self.cluster.submit2(exe, args, cwd=cwd, 5021 input_files=input_files, output_files=output_files) 5022 5023 else: 5024 return self.cluster.submit(exe, args, cwd=cwd)
5025
5026 - def getIO_ajob(self,exe,cwd, args):
5027 # use local disk if possible => need to stands what are the 5028 # input/output files 5029 5030 output_files = [] 5031 required_output = [] 5032 input_files = [pjoin(self.me_dir, 'SubProcesses', 'randinit'), 5033 pjoin(cwd, 'symfact.dat'), 5034 pjoin(cwd, 'iproc.dat'), 5035 pjoin(cwd, 'initial_states_map.dat'), 5036 pjoin(cwd, 'configs_and_props_info.dat'), 5037 pjoin(cwd, 'leshouche_info.dat'), 5038 pjoin(cwd, 'FKS_params.dat')] 5039 5040 # For GoSam interface, we must copy the SLHA card as well 5041 if os.path.exists(pjoin(self.me_dir,'OLP_virtuals','gosam.rc')): 5042 input_files.append(pjoin(self.me_dir, 'Cards', 'param_card.dat')) 5043 5044 if os.path.exists(pjoin(cwd,'nevents.tar')): 5045 input_files.append(pjoin(cwd,'nevents.tar')) 5046 5047 if os.path.exists(pjoin(self.me_dir,'SubProcesses','OLE_order.olc')): 5048 input_files.append(pjoin(cwd, 'OLE_order.olc')) 5049 5050 # File for the loop (might not be present if MadLoop is not used) 5051 if os.path.exists(pjoin(cwd,'MadLoop5_resources.tar.gz')) and \ 5052 cluster.need_transfer(self.options): 5053 input_files.append(pjoin(cwd, 'MadLoop5_resources.tar.gz')) 5054 elif os.path.exists(pjoin(cwd,'MadLoop5_resources')) and \ 5055 cluster.need_transfer(self.options): 5056 tf=tarfile.open(pjoin(cwd,'MadLoop5_resources.tar.gz'),'w:gz', 5057 dereference=True) 5058 tf.add(pjoin(cwd,'MadLoop5_resources'),arcname='MadLoop5_resources') 5059 tf.close() 5060 input_files.append(pjoin(cwd, 'MadLoop5_resources.tar.gz')) 5061 5062 if args[1] == 'born' or args[1] == 'all': 5063 # MADEVENT MINT FO MODE 5064 input_files.append(pjoin(cwd, 'madevent_mintFO')) 5065 if args[2] == '0': 5066 current = '%s_G%s' % (args[1],args[0]) 5067 else: 5068 current = '%s_G%s_%s' % (args[1],args[0],args[2]) 5069 if os.path.exists(pjoin(cwd,current)): 5070 input_files.append(pjoin(cwd, current)) 5071 output_files.append(current) 5072 5073 required_output.append('%s/results.dat' % current) 5074 required_output.append('%s/res_%s.dat' % (current,args[3])) 5075 required_output.append('%s/log_MINT%s.txt' % (current,args[3])) 5076 required_output.append('%s/mint_grids' % current) 5077 required_output.append('%s/grid.MC_integer' % current) 5078 if args[3] != '0': 5079 required_output.append('%s/scale_pdf_dependence.dat' % current) 5080 5081 elif args[1] == 'F' or args[1] == 'B': 5082 # MINTMC MODE 5083 input_files.append(pjoin(cwd, 'madevent_mintMC')) 5084 5085 if args[2] == '0': 5086 current = 'G%s%s' % (args[1],args[0]) 5087 else: 5088 current = 'G%s%s_%s' % (args[1],args[0],args[2]) 5089 if os.path.exists(pjoin(cwd,current)): 5090 input_files.append(pjoin(cwd, current)) 5091 output_files.append(current) 5092 if args[2] > '0': 5093 # this is for the split event generation 5094 output_files.append('G%s%s_%s' % (args[1], args[0], args[2])) 5095 required_output.append('G%s%s_%s/log_MINT%s.txt' % (args[1],args[0],args[2],args[3])) 5096 5097 else: 5098 required_output.append('%s/log_MINT%s.txt' % (current,args[3])) 5099 if args[3] in ['0','1']: 5100 required_output.append('%s/results.dat' % current) 5101 if args[3] == '1': 5102 output_files.append('%s/results.dat' % current) 5103 5104 else: 5105 raise aMCatNLOError('not valid arguments: %s' %(', '.join(args))) 5106 5107 #Find the correct PDF input file 5108 pdfinput = self.get_pdf_input_filename() 5109 if os.path.exists(pdfinput): 5110 input_files.append(pdfinput) 5111 return input_files, output_files, required_output, args
5112 5113
5114 - def compile(self, mode, options):
5115 """compiles aMC@NLO to compute either NLO or NLO matched to shower, as 5116 specified in mode""" 5117 5118 os.mkdir(pjoin(self.me_dir, 'Events', self.run_name)) 5119 5120 self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, 5121 '%s_%s_banner.txt' % (self.run_name, self.run_tag))) 5122 5123 self.get_characteristics(pjoin(self.me_dir, 5124 'SubProcesses', 'proc_characteristics')) 5125 5126 #define a bunch of log files 5127 amcatnlo_log = pjoin(self.me_dir, 'compile_amcatnlo.log') 5128 madloop_log = pjoin(self.me_dir, 'compile_madloop.log') 5129 reweight_log = pjoin(self.me_dir, 'compile_reweight.log') 5130 test_log = pjoin(self.me_dir, 'test.log') 5131 5132 # environmental variables to be included in make_opts 5133 self.make_opts_var = {} 5134 if self.proc_characteristics['has_loops'] and \ 5135 not os.path.exists(pjoin(self.me_dir,'OLP_virtuals')): 5136 self.make_opts_var['madloop'] = 'true' 5137 5138 self.update_status('Compiling the code', level=None, update_results=True) 5139 5140 libdir = pjoin(self.me_dir, 'lib') 5141 sourcedir = pjoin(self.me_dir, 'Source') 5142 5143 #clean files 5144 files.rm([amcatnlo_log, madloop_log, reweight_log, test_log]) 5145 #define which executable/tests to compile 5146 if '+' in mode: 5147 mode = mode.split('+')[0] 5148 if mode in ['NLO', 'LO']: 5149 exe = 'madevent_mintFO' 5150 tests = ['test_ME'] 5151 self.analyse_card.write_card(pjoin(self.me_dir, 'SubProcesses', 'analyse_opts')) 5152 self.analyse_card.update_FO_extrapaths_ajob(pjoin(self.me_dir, 'SubProcesses', 'ajob_template')) 5153 elif mode in ['aMC@NLO', 'aMC@LO','noshower','noshowerLO']: 5154 exe = 'madevent_mintMC' 5155 tests = ['test_ME', 'test_MC'] 5156 # write an analyse_opts with a dummy analysis so that compilation goes through 5157 with open(pjoin(self.me_dir, 'SubProcesses', 'analyse_opts'),'w') as fsock: 5158 fsock.write('FO_ANALYSE=analysis_dummy.o dbook.o open_output_files_dummy.o HwU_dummy.o\n') 5159 5160 #directory where to compile exe 5161 p_dirs = [d for d in \ 5162 open(pjoin(self.me_dir, 'SubProcesses', 'subproc.mg')).read().split('\n') if d] 5163 # create param_card.inc and run_card.inc 5164 self.do_treatcards('', amcatnlo=True, mode=mode) 5165 # if --nocompile option is specified, check here that all exes exists. 5166 # If they exists, return 5167 if all([os.path.exists(pjoin(self.me_dir, 'SubProcesses', p_dir, exe)) \ 5168 for p_dir in p_dirs]) and options['nocompile']: 5169 return 5170 5171 # rm links to lhapdflib/ PDFsets if exist 5172 if os.path.exists(pjoin(libdir, 'PDFsets')): 5173 files.rm(pjoin(libdir, 'PDFsets')) 5174 5175 # read the run_card to find if lhapdf is used or not 5176 if self.run_card['pdlabel'] == 'lhapdf' and \ 5177 (self.banner.get_detail('run_card', 'lpp1') != 0 or \ 5178 self.banner.get_detail('run_card', 'lpp2') != 0): 5179 5180 self.link_lhapdf(libdir, [pjoin('SubProcesses', p) for p in p_dirs]) 5181 pdfsetsdir = self.get_lhapdf_pdfsetsdir() 5182 lhaid_list = self.run_card['lhaid'] 5183 self.copy_lhapdf_set(lhaid_list, pdfsetsdir) 5184 5185 else: 5186 if self.run_card['lpp1'] == 1 == self.run_card['lpp2']: 5187 logger.info('Using built-in libraries for PDFs') 5188 5189 self.make_opts_var['lhapdf'] = "" 5190 5191 # read the run_card to find if PineAPPL is used or not 5192 if self.run_card['pineappl']: 5193 self.make_opts_var['pineappl'] = 'True' 5194 # check validity of the PineAPPL installation 5195 for code in ['pineappl']: 5196 try: 5197 p = subprocess.Popen([self.options[code], '--version'], \ 5198 stdout=subprocess.PIPE, stderr=subprocess.PIPE) 5199 except OSError: 5200 raise aMCatNLOError(('No valid %s installation found. \n' + \ 5201 'Please set the path to %s-config by using \n' + \ 5202 'MG5_aMC> set <absolute-path-to-%s>/bin/%s-config \n') % (code,code,code,code)) 5203 ##else: 5204 ## output, _ = p.communicate() 5205 ## if code is 'applgrid' and output < '1.4.63': 5206 ## raise aMCatNLOError('Version of APPLgrid is too old. Use 1.4.69 or later.'\ 5207 ## +' You are using %s',output) 5208 else: 5209 self.make_opts_var['pineappl'] = "" 5210 5211 if 'fastjet' in list(self.options.keys()) and self.options['fastjet']: 5212 self.make_opts_var['fastjet_config'] = self.options['fastjet'] 5213 5214 # add the make_opts_var to make_opts 5215 self.update_make_opts() 5216 5217 # make Source 5218 self.update_status('Compiling source...', level=None) 5219 misc.compile(['clean4pdf'], cwd = sourcedir) 5220 misc.compile(cwd = sourcedir) 5221 if os.path.exists(pjoin(libdir, 'libdhelas.a')) \ 5222 and os.path.exists(pjoin(libdir, 'libgeneric.a')) \ 5223 and os.path.exists(pjoin(libdir, 'libmodel.a')) \ 5224 and os.path.exists(pjoin(libdir, 'libpdf.a')): 5225 logger.info(' ...done, continuing with P* directories') 5226 else: 5227 raise aMCatNLOError('Compilation failed') 5228 5229 # make StdHep (only necessary with MG option output_dependencies='internal') 5230 MCatNLO_libdir = pjoin(self.me_dir, 'MCatNLO', 'lib') 5231 if not os.path.exists(os.path.realpath(pjoin(MCatNLO_libdir, 'libstdhep.a'))) or \ 5232 not os.path.exists(os.path.realpath(pjoin(MCatNLO_libdir, 'libFmcfio.a'))): 5233 if os.path.exists(pjoin(sourcedir,'StdHEP')): 5234 logger.info('Compiling StdHEP (can take a couple of minutes) ...') 5235 try: 5236 misc.compile(['StdHEP'], cwd = sourcedir) 5237 except Exception as error: 5238 logger.debug(str(error)) 5239 logger.warning("StdHep failed to compiled. This forbids to run NLO+PS with PY6 and Herwig6") 5240 logger.info("details on the compilation error are available if the code is run with --debug flag") 5241 else: 5242 logger.info(' ...done.') 5243 else: 5244 logger.warning('Could not compile StdHEP because its'+\ 5245 ' source directory could not be found in the SOURCE folder.\n'+\ 5246 " Check the MG5_aMC option 'output_dependencies'.\n"+\ 5247 " This will prevent the use of HERWIG6/Pythia6 shower.") 5248 5249 # make CutTools (only necessary with MG option output_dependencies='internal') 5250 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 5251 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 5252 if os.path.exists(pjoin(sourcedir,'CutTools')): 5253 logger.info('Compiling CutTools (can take a couple of minutes) ...') 5254 misc.compile(['CutTools','-j1'], cwd = sourcedir, nb_core=1) 5255 logger.info(' ...done.') 5256 else: 5257 raise aMCatNLOError('Could not compile CutTools because its'+\ 5258 ' source directory could not be found in the SOURCE folder.\n'+\ 5259 " Check the MG5_aMC option 'output_dependencies.'") 5260 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 5261 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 5262 raise aMCatNLOError('CutTools compilation failed.') 5263 5264 # Verify compatibility between current compiler and the one which was 5265 # used when last compiling CutTools (if specified). 5266 compiler_log_path = pjoin(os.path.dirname((os.path.realpath(pjoin( 5267 libdir, 'libcts.a')))),'compiler_version.log') 5268 if os.path.exists(compiler_log_path): 5269 compiler_version_used = open(compiler_log_path,'r').read() 5270 if not str(misc.get_gfortran_version(misc.detect_current_compiler(\ 5271 pjoin(sourcedir,'make_opts')))) in compiler_version_used: 5272 if os.path.exists(pjoin(sourcedir,'CutTools')): 5273 logger.info('CutTools was compiled with a different fortran'+\ 5274 ' compiler. Re-compiling it now...') 5275 misc.compile(['cleanCT'], cwd = sourcedir) 5276 misc.compile(['CutTools','-j1'], cwd = sourcedir, nb_core=1) 5277 logger.info(' ...done.') 5278 else: 5279 raise aMCatNLOError("CutTools installation in %s"\ 5280 %os.path.realpath(pjoin(libdir, 'libcts.a'))+\ 5281 " seems to have been compiled with a different compiler than"+\ 5282 " the one specified in MG5_aMC. Please recompile CutTools.") 5283 5284 # make IREGI (only necessary with MG option output_dependencies='internal') 5285 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libiregi.a'))) \ 5286 and os.path.exists(pjoin(sourcedir,'IREGI')): 5287 logger.info('Compiling IREGI (can take a couple of minutes) ...') 5288 misc.compile(['IREGI'], cwd = sourcedir) 5289 logger.info(' ...done.') 5290 5291 if os.path.exists(pjoin(libdir, 'libiregi.a')): 5292 # Verify compatibility between current compiler and the one which was 5293 # used when last compiling IREGI (if specified). 5294 compiler_log_path = pjoin(os.path.dirname((os.path.realpath(pjoin( 5295 libdir, 'libiregi.a')))),'compiler_version.log') 5296 if os.path.exists(compiler_log_path): 5297 compiler_version_used = open(compiler_log_path,'r').read() 5298 if not str(misc.get_gfortran_version(misc.detect_current_compiler(\ 5299 pjoin(sourcedir,'make_opts')))) in compiler_version_used: 5300 if os.path.exists(pjoin(sourcedir,'IREGI')): 5301 logger.info('IREGI was compiled with a different fortran'+\ 5302 ' compiler. Re-compiling it now...') 5303 misc.compile(['cleanIR'], cwd = sourcedir) 5304 misc.compile(['IREGI'], cwd = sourcedir) 5305 logger.info(' ...done.') 5306 else: 5307 raise aMCatNLOError("IREGI installation in %s"\ 5308 %os.path.realpath(pjoin(libdir, 'libiregi.a'))+\ 5309 " seems to have been compiled with a different compiler than"+\ 5310 " the one specified in MG5_aMC. Please recompile IREGI.") 5311 5312 # check if MadLoop virtuals have been generated 5313 if self.proc_characteristics['has_loops'] and \ 5314 not os.path.exists(pjoin(self.me_dir,'OLP_virtuals')): 5315 if mode in ['NLO', 'aMC@NLO', 'noshower']: 5316 tests.append('check_poles') 5317 5318 # make and run tests (if asked for), gensym and make madevent in each dir 5319 self.update_status('Compiling directories...', level=None) 5320 5321 for test in tests: 5322 self.write_test_input(test) 5323 5324 try: 5325 import multiprocessing 5326 if not self.nb_core: 5327 try: 5328 self.nb_core = int(self.options['nb_core']) 5329 except TypeError: 5330 self.nb_core = multiprocessing.cpu_count() 5331 except ImportError: 5332 self.nb_core = 1 5333 5334 compile_options = copy.copy(self.options) 5335 compile_options['nb_core'] = self.nb_core 5336 compile_cluster = cluster.MultiCore(**compile_options) 5337 logger.info('Compiling on %d cores' % self.nb_core) 5338 5339 update_status = lambda i, r, f: self.donothing(i,r,f) 5340 for p_dir in p_dirs: 5341 compile_cluster.submit(prog = compile_dir, 5342 argument = [self.me_dir, p_dir, mode, options, 5343 tests, exe, self.options['run_mode']]) 5344 try: 5345 compile_cluster.wait(self.me_dir, update_status) 5346 except Exception as error: 5347 logger.warning("Compilation of the Subprocesses failed") 5348 if __debug__: 5349 raise 5350 compile_cluster.remove() 5351 self.do_quit('') 5352 5353 logger.info('Checking test output:') 5354 for p_dir in p_dirs: 5355 logger.info(p_dir) 5356 for test in tests: 5357 logger.info(' Result for %s:' % test) 5358 5359 this_dir = pjoin(self.me_dir, 'SubProcesses', p_dir) 5360 #check that none of the tests failed 5361 self.check_tests(test, this_dir)
5362 5363
5364 - def donothing(*args):
5365 pass
5366 5367
5368 - def check_tests(self, test, dir):
5369 """just call the correct parser for the test log. 5370 Skip check_poles for LOonly folders""" 5371 if test in ['test_ME', 'test_MC']: 5372 return self.parse_test_mx_log(pjoin(dir, '%s.log' % test)) 5373 elif test == 'check_poles' and not os.path.exists(pjoin(dir,'parton_lum_0.f')): 5374 return self.parse_check_poles_log(pjoin(dir, '%s.log' % test))
5375 5376
5377 - def parse_test_mx_log(self, log):
5378 """read and parse the test_ME/MC.log file""" 5379 content = open(log).read() 5380 if 'FAILED' in content: 5381 logger.info('Output of the failing test:\n'+content[:-1],'$MG:BOLD') 5382 raise aMCatNLOError('Some tests failed, run cannot continue. Please search on https://answers.launchpad.net/mg5amcnlo for more information, and in case there is none, report the problem there.') 5383 else: 5384 lines = [l for l in content.split('\n') if 'PASSED' in l] 5385 logger.info(' Passed.') 5386 logger.debug('\n'+'\n'.join(lines))
5387 5388
5389 - def parse_check_poles_log(self, log):
5390 """reads and parse the check_poles.log file""" 5391 content = open(log).read() 5392 npass = 0 5393 nfail = 0 5394 for line in content.split('\n'): 5395 if 'PASSED' in line: 5396 npass +=1 5397 tolerance = float(line.split()[1]) 5398 if 'FAILED' in line: 5399 nfail +=1 5400 tolerance = float(line.split()[1]) 5401 5402 if nfail + npass == 0: 5403 logger.warning('0 points have been tried') 5404 return 5405 5406 if float(nfail)/float(nfail+npass) > 0.1: 5407 raise aMCatNLOError('Poles do not cancel, run cannot continue') 5408 else: 5409 logger.info(' Poles successfully cancel for %d points over %d (tolerance=%2.1e)' \ 5410 %(npass, nfail+npass, tolerance))
5411 5412
5413 - def write_test_input(self, test):
5414 """write the input files to run test_ME/MC or check_poles""" 5415 if test in ['test_ME', 'test_MC']: 5416 content = "-2 -2\n" #generate randomly energy/angle 5417 content+= "100 100\n" #run 100 points for soft and collinear tests 5418 content+= "0\n" #all FKS configs 5419 content+= '\n'.join(["-1"] * 50) #random diagram (=first diagram) 5420 elif test == 'check_poles': 5421 content = '20 \n -1\n' 5422 5423 file = open(pjoin(self.me_dir, '%s_input.txt' % test), 'w') 5424 if test == 'test_MC': 5425 shower = self.run_card['parton_shower'] 5426 header = "1 \n %s\n 1 -0.1\n-1 -0.1\n" % shower 5427 file.write(header + content) 5428 elif test == 'test_ME': 5429 header = "2 \n" 5430 file.write(header + content) 5431 else: 5432 file.write(content) 5433 file.close()
5434 5435 5436 action_switcher = AskRunNLO 5437 ############################################################################
5438 - def ask_run_configuration(self, mode, options, switch={}):
5439 """Ask the question when launching generate_events/multi_run""" 5440 5441 if 'parton' not in options: 5442 options['parton'] = False 5443 if 'reweightonly' not in options: 5444 options['reweightonly'] = False 5445 5446 if mode == 'auto': 5447 mode = None 5448 if not mode and (options['parton'] or options['reweightonly']): 5449 mode = 'noshower' 5450 5451 passing_cmd = [] 5452 for key,value in switch.keys(): 5453 passing_cmd.append('%s=%s' % (key,value)) 5454 5455 if 'do_reweight' in options and options['do_reweight']: 5456 passing_cmd.append('reweight=ON') 5457 if 'do_madspin' in options and options['do_madspin']: 5458 passing_cmd.append('madspin=ON') 5459 5460 force = self.force 5461 if mode == 'onlyshower': 5462 passing_cmd.append('onlyshower') 5463 force = True 5464 elif mode: 5465 passing_cmd.append(mode) 5466 ####mode = None # allow to overwrite it due to EW 5467 5468 switch, cmd_switch = self.ask('', '0', [], ask_class = self.action_switcher, 5469 mode=mode, force=(force or mode), 5470 first_cmd=passing_cmd, 5471 return_instance=True) 5472 5473 if 'mode' in switch: 5474 mode = switch['mode'] 5475 #assign the mode depending of the switch 5476 if not mode or mode == 'auto': 5477 if switch['order'] == 'LO': 5478 if switch['runshower']: 5479 mode = 'aMC@LO' 5480 elif switch['fixed_order'] == 'ON': 5481 mode = 'LO' 5482 else: 5483 mode = 'noshowerLO' 5484 elif switch['order'] == 'NLO': 5485 if switch['runshower']: 5486 mode = 'aMC@NLO' 5487 elif switch['fixed_order'] == 'ON': 5488 mode = 'NLO' 5489 else: 5490 mode = 'noshower' 5491 logger.info('will run in mode: %s' % mode) 5492 5493 if mode == 'noshower': 5494 if switch['shower'] == 'OFF': 5495 logger.warning("""You have chosen not to run a parton shower. NLO events without showering are NOT physical. Please, shower the LesHouches events before using them for physics analyses. You have to choose NOW which parton-shower you WILL use and specify it in the run_card.""") 5496 else: 5497 logger.info("""Your parton-shower choice is not available for running. Events will be generated for the associated parton shower. Remember that NLO events without showering are NOT physical.""", '$MG:BOLD') 5498 5499 5500 # specify the cards which are needed for this run. 5501 cards = ['param_card.dat', 'run_card.dat'] 5502 ignore = [] 5503 if mode in ['LO', 'NLO']: 5504 options['parton'] = True 5505 ignore = ['shower_card.dat', 'madspin_card.dat'] 5506 cards.append('FO_analyse_card.dat') 5507 else: 5508 if switch['madspin'] != 'OFF': 5509 cards.append('madspin_card.dat') 5510 if switch['reweight'] != 'OFF': 5511 cards.append('reweight_card.dat') 5512 if switch['madanalysis'] in ['HADRON', 'ON']: 5513 cards.append('madanalysis5_hadron_card.dat') 5514 if 'aMC@' in mode: 5515 cards.append('shower_card.dat') 5516 if mode == 'onlyshower': 5517 cards = ['shower_card.dat'] 5518 if options['reweightonly']: 5519 cards = ['run_card.dat'] 5520 5521 self.keep_cards(cards, ignore) 5522 5523 if mode =='onlyshower': 5524 cards = ['shower_card.dat'] 5525 5526 5527 # automatically switch to keep_wgt option 5528 first_cmd = cmd_switch.get_cardcmd() 5529 5530 if not options['force'] and not self.force: 5531 self.ask_edit_cards(cards, plot=False, first_cmd=first_cmd) 5532 5533 self.banner = banner_mod.Banner() 5534 5535 # store the cards in the banner 5536 for card in cards: 5537 self.banner.add(pjoin(self.me_dir, 'Cards', card)) 5538 # and the run settings 5539 run_settings = '\n'.join(['%s = %s' % (k, v) for (k, v) in switch.items()]) 5540 self.banner.add_text('run_settings', run_settings) 5541 5542 if not mode =='onlyshower': 5543 self.run_card = self.banner.charge_card('run_card') 5544 self.run_tag = self.run_card['run_tag'] 5545 #this is if the user did not provide a name for the current run 5546 if not hasattr(self, 'run_name') or not self.run_name: 5547 self.run_name = self.find_available_run_name(self.me_dir) 5548 #add a tag in the run_name for distinguish run_type 5549 if self.run_name.startswith('run_'): 5550 if mode in ['LO','aMC@LO','noshowerLO']: 5551 self.run_name += '_LO' 5552 self.set_run_name(self.run_name, self.run_tag, 'parton') 5553 if self.run_card['ickkw'] == 3 and mode in ['LO', 'aMC@LO', 'noshowerLO']: 5554 raise self.InvalidCmd("""FxFx merging (ickkw=3) not allowed at LO""") 5555 elif self.run_card['ickkw'] == 3 and ((mode in ['aMC@NLO'] and self.run_card['parton_shower'].upper() != 'PYTHIA8') or mode in ['noshower']): 5556 logger.warning("""You are running with FxFx merging enabled. To be able to merge 5557 samples of various multiplicities without double counting, you 5558 have to remove some events after showering 'by hand'. Please 5559 read http://amcatnlo.cern.ch/FxFx_merging.htm for more details.""") 5560 5561 if self.run_card['parton_shower'].upper() == 'PYTHIA6Q': 5562 raise self.InvalidCmd("""FxFx merging does not work with Pythia6's Q-squared ordered showers.""") 5563 elif self.run_card['parton_shower'].upper() != 'HERWIG6' and self.run_card['parton_shower'].upper() != 'PYTHIA8' and self.run_card['parton_shower'].upper() != 'HERWIGPP': 5564 question="FxFx merging not tested for %s shower. Do you want to continue?\n" % self.run_card['parton_shower'] + \ 5565 "Type \'n\' to stop or \'y\' to continue" 5566 answers = ['n','y'] 5567 answer = self.ask(question, 'n', answers) 5568 if answer == 'n': 5569 error = '''Stop opertation''' 5570 self.ask_run_configuration(mode, options) 5571 # raise aMCatNLOError(error) 5572 elif self.run_card['ickkw'] == -1 and mode in ['aMC@NLO', 'noshower']: 5573 # NNLL+NLO jet-veto only possible for LO event generation or fNLO runs. 5574 raise self.InvalidCmd("""NNLL+NLO jet veto runs (ickkw=-1) only possible for fNLO or LO.""") 5575 if 'aMC@' in mode or mode == 'onlyshower': 5576 self.shower_card = self.banner.charge_card('shower_card') 5577 5578 elif mode in ['LO', 'NLO']: 5579 analyse_card_path = pjoin(self.me_dir, 'Cards','FO_analyse_card.dat') 5580 self.analyse_card = self.banner.charge_card('FO_analyse_card') 5581 5582 return mode
5583
5584 5585 #=============================================================================== 5586 # aMCatNLOCmd 5587 #=============================================================================== 5588 -class aMCatNLOCmdShell(aMCatNLOCmd, cmd.CmdShell):
5589 """The command line processor of MadGraph"""
5590 5591 _compile_usage = "compile [MODE] [options]\n" + \ 5592 "-- compiles aMC@NLO \n" + \ 5593 " MODE can be either FO, for fixed-order computations, \n" + \ 5594 " or MC for matching with parton-shower monte-carlos. \n" + \ 5595 " (if omitted, it is set to MC)\n" 5596 _compile_parser = misc.OptionParser(usage=_compile_usage) 5597 _compile_parser.add_option("-f", "--force", default=False, action='store_true', 5598 help="Use the card present in the directory for the launch, without editing them") 5599 5600 _launch_usage = "launch [MODE] [options]\n" + \ 5601 "-- execute aMC@NLO \n" + \ 5602 " MODE can be either LO, NLO, aMC@NLO or aMC@LO (if omitted, it is asked in a separate question)\n" + \ 5603 " If mode is set to LO/NLO, no event generation will be performed, but only the \n" + \ 5604 " computation of the total cross section and the filling of parton-level histograms.\n" + \ 5605 " If mode is set to aMC@LO/aMC@NLO, after the cross-section computation, a .lhe \n" + \ 5606 " event file is generated which will be showered with the MonteCarlo specified \n" + \ 5607 " in the run_card.dat\n" 5608 5609 _launch_parser = misc.OptionParser(usage=_launch_usage) 5610 _launch_parser.add_option("-f", "--force", default=False, action='store_true', 5611 help="Use the card present in the directory for the launch, without editing them") 5612 _launch_parser.add_option("-c", "--cluster", default=False, action='store_true', 5613 help="Submit the jobs on the cluster") 5614 _launch_parser.add_option("-m", "--multicore", default=False, action='store_true', 5615 help="Submit the jobs on multicore mode") 5616 _launch_parser.add_option("-x", "--nocompile", default=False, action='store_true', 5617 help="Skip compilation. Ignored if no executable is found") 5618 _launch_parser.add_option("-r", "--reweightonly", default=False, action='store_true', 5619 help="Skip integration and event generation, just run reweight on the" + \ 5620 " latest generated event files (see list in SubProcesses/nevents_unweighted)") 5621 _launch_parser.add_option("-p", "--parton", default=False, action='store_true', 5622 help="Stop the run after the parton level file generation (you need " + \ 5623 "to shower the file in order to get physical results)") 5624 _launch_parser.add_option("-o", "--only_generation", default=False, action='store_true', 5625 help="Skip grid set up, just generate events starting from " + \ 5626 "the last available results") 5627 _launch_parser.add_option("-n", "--name", default=False, dest='run_name', 5628 help="Provide a name to the run") 5629 _launch_parser.add_option("-R", "--reweight", default=False, dest='do_reweight', action='store_true', 5630 help="Run the reweight module (reweighting by different model parameters)") 5631 _launch_parser.add_option("-M", "--madspin", default=False, dest='do_madspin', action='store_true', 5632 help="Run the madspin package") 5633 5634 5635 5636 _generate_events_usage = "generate_events [MODE] [options]\n" + \ 5637 "-- execute aMC@NLO \n" + \ 5638 " MODE can be either LO, NLO, aMC@NLO or aMC@LO (if omitted, it is asked in a separate question)\n" + \ 5639 " If mode is set to LO/NLO, no event generation will be performed, but only the \n" + \ 5640 " computation of the total cross section and the filling of parton-level histograms.\n" + \ 5641 " If mode is set to aMC@LO/aMC@NLO, after the cross-section computation, a .lhe \n" + \ 5642 " event file is generated which will be showered with the MonteCarlo specified \n" + \ 5643 " in the run_card.dat\n" 5644 5645 _generate_events_parser = misc.OptionParser(usage=_generate_events_usage) 5646 _generate_events_parser.add_option("-f", "--force", default=False, action='store_true', 5647 help="Use the card present in the directory for the generate_events, without editing them") 5648 _generate_events_parser.add_option("-c", "--cluster", default=False, action='store_true', 5649 help="Submit the jobs on the cluster") 5650 _generate_events_parser.add_option("-m", "--multicore", default=False, action='store_true', 5651 help="Submit the jobs on multicore mode") 5652 _generate_events_parser.add_option("-x", "--nocompile", default=False, action='store_true', 5653 help="Skip compilation. Ignored if no executable is found") 5654 _generate_events_parser.add_option("-r", "--reweightonly", default=False, action='store_true', 5655 help="Skip integration and event generation, just run reweight on the" + \ 5656 " latest generated event files (see list in SubProcesses/nevents_unweighted)") 5657 _generate_events_parser.add_option("-p", "--parton", default=False, action='store_true', 5658 help="Stop the run after the parton level file generation (you need " + \ 5659 "to shower the file in order to get physical results)") 5660 _generate_events_parser.add_option("-o", "--only_generation", default=False, action='store_true', 5661 help="Skip grid set up, just generate events starting from " + \ 5662 "the last available results") 5663 _generate_events_parser.add_option("-n", "--name", default=False, dest='run_name', 5664 help="Provide a name to the run") 5665 5666 5667 5668 _calculate_xsect_usage = "calculate_xsect [ORDER] [options]\n" + \ 5669 "-- calculate cross section up to ORDER.\n" + \ 5670 " ORDER can be either LO or NLO (if omitted, it is set to NLO). \n" 5671 5672 _calculate_xsect_parser = misc.OptionParser(usage=_calculate_xsect_usage) 5673 _calculate_xsect_parser.add_option("-f", "--force", default=False, action='store_true', 5674 help="Use the card present in the directory for the launch, without editing them") 5675 _calculate_xsect_parser.add_option("-c", "--cluster", default=False, action='store_true', 5676 help="Submit the jobs on the cluster") 5677 _calculate_xsect_parser.add_option("-m", "--multicore", default=False, action='store_true', 5678 help="Submit the jobs on multicore mode") 5679 _calculate_xsect_parser.add_option("-x", "--nocompile", default=False, action='store_true', 5680 help="Skip compilation. Ignored if no executable is found") 5681 _calculate_xsect_parser.add_option("-n", "--name", default=False, dest='run_name', 5682 help="Provide a name to the run") 5683 _calculate_xsect_parser.add_option("-o", "--only_generation", default=False, action='store_true', 5684 help="Skip grid set up, just generate events starting from " + \ 5685 "the last available results") 5686 5687 _shower_usage = 'shower run_name [options]\n' + \ 5688 '-- do shower/hadronization on parton-level file generated for run run_name\n' + \ 5689 ' all the information (e.g. number of events, MonteCarlo, ...\n' + \ 5690 ' are directly read from the header of the event file\n' 5691 _shower_parser = misc.OptionParser(usage=_shower_usage) 5692 _shower_parser.add_option("-f", "--force", default=False, action='store_true', 5693 help="Use the shower_card present in the directory for the launch, without editing") 5694 5695 if '__main__' == __name__: 5696 # Launch the interface without any check if one code is already running. 5697 # This can ONLY run a single command !! 5698 import sys 5699 5700 if sys.version_info[1] < 7: 5701 sys.exit('MadGraph5_aMc@NLO works only with python 2.7 or python3.7 and later.\n'+\ 5702 'Please upgrade your version of python or specify a compatible version.') 5703 5704 import os 5705 import optparse 5706 # Get the directory of the script real path (bin) 5707 # and add it to the current PYTHONPATH 5708 root_path = os.path.dirname(os.path.dirname(os.path.realpath( __file__ ))) 5709 sys.path.insert(0, root_path)
5710 5711 - class MyOptParser(optparse.OptionParser):
5712 - class InvalidOption(Exception): pass
5713 - def error(self, msg=''):
5714 raise MyOptParser.InvalidOption(msg)
5715 # Write out nice usage message if called with -h or --help 5716 usage = "usage: %prog [options] [FILE] " 5717 parser = MyOptParser(usage=usage) 5718 parser.add_option("-l", "--logging", default='INFO', 5719 help="logging level (DEBUG|INFO|WARNING|ERROR|CRITICAL) [%default]") 5720 parser.add_option("","--web", action="store_true", default=False, dest='web', \ 5721 help='force toce to be in secure mode') 5722 parser.add_option("","--debug", action="store_true", default=False, dest='debug', \ 5723 help='force to launch debug mode') 5724 parser_error = '' 5725 done = False 5726 5727 for i in range(len(sys.argv)-1): 5728 try: 5729 (options, args) = parser.parse_args(sys.argv[1:len(sys.argv)-i]) 5730 done = True 5731 except MyOptParser.InvalidOption as error: 5732 pass 5733 else: 5734 args += sys.argv[len(sys.argv)-i:] 5735 if not done: 5736 # raise correct error: 5737 try: 5738 (options, args) = parser.parse_args() 5739 except MyOptParser.InvalidOption as error: 5740 print(error) 5741 sys.exit(2) 5742 5743 if len(args) == 0: 5744 args = '' 5745 5746 import subprocess 5747 import logging 5748 import logging.config 5749 # Set logging level according to the logging level given by options 5750 #logging.basicConfig(level=vars(logging)[options.logging]) 5751 import internal.coloring_logging 5752 try: 5753 if __debug__ and options.logging == 'INFO': 5754 options.logging = 'DEBUG' 5755 if options.logging.isdigit(): 5756 level = int(options.logging) 5757 else: 5758 level = eval('logging.' + options.logging) 5759 print(os.path.join(root_path, 'internal', 'me5_logging.conf')) 5760 logging.config.fileConfig(os.path.join(root_path, 'internal', 'me5_logging.conf')) 5761 logging.root.setLevel(level) 5762 logging.getLogger('madgraph').setLevel(level) 5763 except: 5764 raise 5765 pass 5766 5767 # Call the cmd interface main loop 5768 try: 5769 if args: 5770 # a single command is provided 5771 if '--web' in args: 5772 i = args.index('--web') 5773 args.pop(i) 5774 cmd_line = aMCatNLOCmd(me_dir=os.path.dirname(root_path),force_run=True) 5775 else: 5776 cmd_line = aMCatNLOCmdShell(me_dir=os.path.dirname(root_path),force_run=True) 5777 5778 if not hasattr(cmd_line, 'do_%s' % args[0]): 5779 if parser_error: 5780 print(parser_error) 5781 print('and %s can not be interpreted as a valid command.' % args[0]) 5782 else: 5783 print('ERROR: %s not a valid command. Please retry' % args[0]) 5784 else: 5785 cmd_line.use_rawinput = False 5786 cmd_line.run_cmd(' '.join(args)) 5787 cmd_line.run_cmd('quit') 5788 5789 except KeyboardInterrupt: 5790 print('quit on KeyboardInterrupt') 5791 pass 5792